diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/archs/srvgg_arch.py b/spaces/17TheWord/RealESRGAN/realesrgan/archs/srvgg_arch.py deleted file mode 100644 index 39460965c9c5ee9cd6eb41c50d33574cb8ba6e50..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/realesrgan/archs/srvgg_arch.py +++ /dev/null @@ -1,69 +0,0 @@ -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn as nn -from torch.nn import functional as F - - -@ARCH_REGISTRY.register() -class SRVGGNetCompact(nn.Module): - """A compact VGG-style network structure for super-resolution. - - It is a compact network structure, which performs upsampling in the last layer and no convolution is - conducted on the HR feature space. - - Args: - num_in_ch (int): Channel number of inputs. Default: 3. - num_out_ch (int): Channel number of outputs. Default: 3. - num_feat (int): Channel number of intermediate features. Default: 64. - num_conv (int): Number of convolution layers in the body network. Default: 16. - upscale (int): Upsampling factor. Default: 4. - act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu. - """ - - def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'): - super(SRVGGNetCompact, self).__init__() - self.num_in_ch = num_in_ch - self.num_out_ch = num_out_ch - self.num_feat = num_feat - self.num_conv = num_conv - self.upscale = upscale - self.act_type = act_type - - self.body = nn.ModuleList() - # the first conv - self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) - # the first activation - if act_type == 'relu': - activation = nn.ReLU(inplace=True) - elif act_type == 'prelu': - activation = nn.PReLU(num_parameters=num_feat) - elif act_type == 'leakyrelu': - activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) - self.body.append(activation) - - # the body structure - for _ in range(num_conv): - self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) - # activation - if act_type == 'relu': - activation = nn.ReLU(inplace=True) - elif act_type == 'prelu': - activation = nn.PReLU(num_parameters=num_feat) - elif act_type == 'leakyrelu': - activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) - self.body.append(activation) - - # the last conv - self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) - # upsample - self.upsampler = nn.PixelShuffle(upscale) - - def forward(self, x): - out = x - for i in range(0, len(self.body)): - out = self.body[i](out) - - out = self.upsampler(out) - # add the nearest upsampled image, so that the network learns the residual - base = F.interpolate(x, scale_factor=self.upscale, mode='nearest') - out += base - return out diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 The Ultimate Guide for Navigation and Entertainment.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 The Ultimate Guide for Navigation and Entertainment.md deleted file mode 100644 index 9be5a51742471ae9e9b9d4dc1c095d4b738fa0d2..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 The Ultimate Guide for Navigation and Entertainment.md +++ /dev/null @@ -1,139 +0,0 @@ -
-

DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46

-

If you are looking for a high-quality navigation system for your car, you might be interested in the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46. This is a DVD-based map update for the KENWOOD DV3200 navigation system, which is manufactured by DENSO, a leading supplier of automotive technology. In this article, we will review this product and tell you everything you need to know about it.

-

DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46


Download File https://byltly.com/2uKyg0



-

Introduction

-

What is DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46?

-

DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 is a map update disc that contains the latest data for the KENWOOD DV3200 navigation system. This system is a DVD-based navigation system that uses a touch screen display and a remote control to provide you with turn-by-turn directions, voice guidance, and various other features. The system can also play DVDs, CDs, MP3s, and other media formats.

-

Why do you need it?

-

You need this map update disc if you want to enjoy the best performance and accuracy of your KENWOOD DV3200 navigation system. The disc contains the most recent map data for Europe, including countries such as France, Germany, Italy, Spain, UK, and more. It also includes thousands of points of interest, such as gas stations, restaurants, hotels, attractions, and more. By updating your map data, you can avoid getting lost, save time and fuel, and discover new places.

-

How to install it?

-

Installing the map update disc is very easy and straightforward. All you need to do is insert the disc into your DVD player in your car and follow the on-screen instructions. The update process will take about 15 minutes to complete. You can then eject the disc and enjoy your updated navigation system.

-

Features and Benefits

-

High-quality navigation system

-

The KENWOOD DV3200 navigation system is a high-quality navigation system that offers you many features and benefits. Some of them are:

- -

Compatible with various models of cars

-

The KENWOOD DV3200 navigation system is compatible with various models of cars that have a DVD player and a screen in their dashboard. Some of these models are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Updated maps and points of interest

-

The DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 contains the updated maps and points of interest for Europe. The maps cover more than 40 countries and regions, including:

- -

Easy to use interface and functions

-

The KENWOOD DV3200 navigation system has an easy to use interface and functions that make it user-friendly and convenient. Some of them are:

- -

Pros and Cons

-

Pros

-

The DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 has many pros that make it a great product for your car. Some of them are:

- -

Cons

- them are:

-

DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 update
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 compatibility
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 installation
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 price
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 review
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 manual
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 download
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 for sale
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 ebay
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 amazon
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 software
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 error
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 replacement
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 repair
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 warranty
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 version
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 original
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 new
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 used
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 refurbished
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 cheap
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 discount
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 free shipping
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 best buy
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 online
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 in stock
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 availability
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 model number
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 serial number
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 product code

- -

Conclusion

-

Summary of the main points

-

In conclusion, the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 is a map update disc that provides you with the latest map data and points of interest for Europe for your KENWOOD DV3200 navigation system. The product has many features and benefits that make it a high-quality navigation system for your car. It also has some pros and cons that you should consider before buying it.

-

Recommendation and call to action

-

If you are looking for a map update disc for your KENWOOD DV3200 navigation system, we recommend you to buy the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46. It is a reliable and affordable product that will enhance your driving experience and help you discover new places. You can buy it online from various websites or offline from authorized dealers. Don't miss this opportunity and order yours today!

-

FAQs

-

Here are some frequently asked questions about the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46:

-
    -
  1. Q: How often should I update my map data?
    A: You should update your map data at least once a year to ensure that you have the most accurate and up-to-date information.
  2. -
  3. Q: How can I check if my car is compatible with the product?
    A: You can check the compatibility list on the official website of the manufacturer or contact them directly for more information.
  4. -
  5. Q: How can I contact the customer support or claim the warranty?
    A: You can contact the customer support or claim the warranty by calling the toll-free number or sending an email to the address provided on the product package or manual.
  6. -
  7. Q: What are some other products that are similar to this one?
    A: Some other products that are similar to this one are DVD MAP 2011-2012 KENWOOD DNX520VBT DENSO Part No. 46, DVD MAP 2011-2012 KENWOOD DNX7200 DENSO Part No. 46, and DVD MAP 2011-2012 KENWOOD DNX8220BT DENSO Part No. 46.
  8. -
  9. Q: What are some reviews from other customers who bought this product?
    A: Some reviews from other customers who bought this product are:
  10. - -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fine Cut Para Corel X7 Serial Number.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fine Cut Para Corel X7 Serial Number.md deleted file mode 100644 index 7d3dac91b19f45a997ffbe49bc9093c510e0694b..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fine Cut Para Corel X7 Serial Number.md +++ /dev/null @@ -1,137 +0,0 @@ - -

What is FineCut/Coat9 for CorelDRAW and why do you need it?

-

If you are looking for a cutting plotter software that can handle sophisticated design needs, you might want to try FineCut/Coat9 for CorelDRAW. This is a plug-in software that allows you to do design, creation of cutting data and its output to a cutting plotter from inside CorelDRAW, one of the most popular graphic design software in the world.

-

FineCut/Coat9 for CorelDRAW is developed by Mimaki, a leading manufacturer of wide-format inkjet printers and cutting machines. It is compatible with various Mimaki cutting plotters, such as CJV, UCJV, CG, CF and CFL series. It also supports output to laser engraving machines of other manufacturers.

-

fine cut para corel x7 serial number


Download File >>> https://byltly.com/2uKvad



-

With FineCut/Coat9 for CorelDRAW, you can create stunning graphics and cut them out with precision and ease. Whether you are making signs, stickers, labels, decals, logos, banners, posters, packaging, apparel, or any other creative projects, FineCut/Coat9 for CorelDRAW can help you achieve professional results.

-

FineCut/Coat9 for CorelDRAW is a plug-in software that enhances the potentiality of cutting plotter

-

FineCut/Coat9 for CorelDRAW is not just a simple cutting tool. It is a comprehensive software that offers many features and benefits that can improve your workflow and productivity.

-

Features and benefits of FineCut/Coat9 for CorelDRAW

- -

How to install and use FineCut/Coat9 for CorelDRAW

-

To install FineCut/Coat9 for CorelDRAW, you need to download it from Mimaki's website and enter the serial key. You can get the serial key by authenticating your previous FineCut CD or by purchasing a new product.

-

To use FineCut/Coat9 for CorelDRAW, you need to follow these steps:

-

fine cut plugin for corel x7 activation code
-fine cut software for corel x7 license key
-fine cut 8.0.5 for corel x7 crack download
-fine cut tool for corel x7 product key
-fine cut para corel draw x7 serial number generator
-fine cut 8 for corel x7 free download full version
-fine cut para corel graphics suite x7 keygen
-fine cut 9 for corel x7 registration code
-fine cut para corel draw graphics suite x7 crack
-fine cut 8.0.6 for corel x7 patch
-fine cut para corel draw x7 full version download
-fine cut 9.0.1 for corel x7 serial key
-fine cut para corel draw graphics suite x7 activation key
-fine cut 8.0.4 for corel x7 license code
-fine cut para corel draw x7 keygen download
-fine cut 9.0.2 for corel x7 crack free download
-fine cut para corel graphics suite x7 serial number and activation code
-fine cut 8.0.3 for corel x7 product code
-fine cut para corel draw graphics suite x7 license number
-fine cut 9.0.3 for corel x7 patch download
-fine cut para corel draw x7 free download with crack
-fine cut 8.0.2 for corel x7 serial number and activation code generator
-fine cut para corel graphics suite x7 keygen download
-fine cut 9.0.4 for corel x7 license key free download
-fine cut para corel draw graphics suite x7 crack free download
-fine cut 8.0.1 for corel x7 activation key free download
-fine cut para corel draw x7 serial number and activation code free download
-fine cut 9.0.5 for corel x7 product key free download
-fine cut para corel graphics suite x7 crack download
-fine cut 8 for corel x7 serial number free download
-fine cut para corel draw graphics suite x7 patch download
-fine cut 9 for corel x7 activation code free download
-fine cut para corel draw x7 full version free download with crack
-fine cut 8 for corel x7 license key free download
-fine cut para corel graphics suite x7 serial number free download
-fine cut 9 for corel x7 license code free download
-fine cut para corel draw graphics suite x7 full version free download with keygen
-fine cut 8 for corel x7 product key free download
-fine cut para corel graphics suite x7 license key free download
-fine cut 9 for corel x7 product code free download
-fine cut para corel draw graphics suite x7 registration code free download
-fine cut 8 for corel x7 registration code free download
-fine cut para corel graphics suite x7 activation code free download
-fine cut 9 for corel x7 patch free download
-fine cut para corel draw graphics suite x7 license code free download
-fine cut 8 for corel x7 patch free download
-fine cut para corel graphics suite x7 registration code free download
-fine cut 9 for corel x7 keygen free download
-fine cut para corel draw graphics suite x7 product code free download

-
    -
  1. Open CorelDRAW and create or open your design file.
  2. -
  3. Select the objects that you want to cut out and go to FineCut > Create Cut Data.
  4. -
  5. In the Create Cut Data dialog box, choose your cutting plotter model and set your cutting conditions.
  6. -
  7. Click Create to generate the cutting data.
  8. -
  9. If you want to edit the cutting data further, go to FineCut > Edit Cut Data.
  10. -
  11. If you want to preview the cutting data or simulate the cutting process, go to FineCut > Cut Preview.
  12. -
  13. When you are ready to output the cutting data to the cutting plotter, go to FineCut > Cut Out.
  14. -
  15. In the Cut Out dialog box, choose your connection method and click Cut Out.
  16. -
  17. The cutting plotter will start cutting your design according to your settings.
  18. -
-

How to get a serial number for FineCut/Coat9 for CorelDRAW

-

To use FineCut/Coat9 for CorelDRAW, you need a serial number that matches your product version. There are two ways to get a serial number:

-

Serial authentication procedure for FineCut/Coat9 for CorelDRAW

-

If you have a previous version of FineCut CD (FineCut8 or below), you can upgrade to FineCut/Coat9 with free of charge by following these steps:

-
    -
  1. Download FineCut/Coat9 from Mimaki's website and install it on your computer.
  2. -
  3. Insert your previous version of FineCut CD into your CD-ROM drive.
  4. -
  5. Run FinecutAuthTool.exe, which is located in the folder where you installed FineCut/Coat9.
  6. -
  7. Select your product name from the drop-down list and click Next.
  8. -
  9. The tool will read your previous version of FineCut CD and generate a serial key for FineCut/Coat9.
  10. -
  11. Copy the serial key and paste it into the registration window of FineCut/Coat9.
  12. -
  13. You can now use FineCut/Coat9 with full functionality.
  14. -
-

List of serial numbers and activation codes for FineCut/Coat9 for CorelDRAW

-

If you don't have a previous version of FineCut CD or if you want to use a different product name than your previous one, you need to purchase a new product from Mimaki's website or authorized dealers. Here are some examples of serial numbers and activation codes for different products:

-
BrandModelYear
AudiA42008-2011
BmwX52007-2010
CitroenC52008-2012
FordFocus2009-2011
HondaCivic2006-2011
MazdaMazda62008-2012
NissanQashqai2007-2012
VolkswagenGolf2009-2012
- - -| Product name | Serial number | Activation code | | ------------ | ------------- | --------------- | | Finecut 8 Upgrade Kit (for CG-FXII) | F8U-CGFXII-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XX XX-XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX X7 Serial Number Activation Code Business October 5 2022 Sumit | F8U-CGFXII-ACTI-VATI-ONCO-DEHE-RE00-0000-0000-0000-0000-0000-0000-0000-0000-0000 | | Finecut 8 Upgrade Kit (for CF2) | F8U-CF2XXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-X7 Serial Number Activation Code Business October 5 2022 Sumit | F8U-CF2XXX-ACTI-VATI-ONCO-DEHE-RE00-0000 | | Finecut 8 Upgrade Kit (for CF3) | F8U-CF3XXX-X7 Serial Number Activation Code Business October 5 2022 Sumit | F8U-CF3XXX-ACTI-VATI | | Finecut 9 Standard Kit (for CorelDRAW) | F9S-CORELX-X7 Serial Number Activation Code Business October 5 2022 Sumit | F9S-CORELX-ACTI-VATI |

How to troubleshoot common issues with FineCut/Coat9 for CorelDRAW

-

Although FineCut/Coat9 for CorelDRAW is a reliable and user-friendly software, you may encounter some problems or errors while using it. Here are some tips on how to troubleshoot common issues with FineCut/Coat9 for CorelDRAW:

-

How to fix slow operation or freezing when using FineCut/Coat9 for CorelDRAW Ver2.4

-

If you are using FineCut/Coat9 for CorelDRAW Ver2.4, you may experience extremely slow operation or freezing when you create or edit cut data. This is because of a compatibility issue between FineCut/Coat9 and CorelDRAW Graphics Suite 2021/2022.

-

To fix this issue, you need to update FineCut/Coat9 to Ver2.4.1 or later. You can download the latest version of FineCut/Coat9 from Mimaki's website and install it on your computer. After updating FineCut/Coat9, you should be able to use it normally with CorelDRAW Graphics Suite 2021/2022.

-

How to contact Mimaki support for FineCut/Coat9 for CorelDRAW

-

If you have any questions or problems with FineCut/Coat9 for CorelDRAW that are not covered in this article, you can contact Mimaki support for assistance. You can find the contact information of Mimaki support in your region on Mimaki's website.

-

Before contacting Mimaki support, please prepare the following information:

- -

Mimaki support will try to help you resolve your issue as soon as possible.

-

Conclusion

-

Summary of the main points

-

In this article, we have learned about FineCut/Coat9 for CorelDRAW, a plug-in software that enhances the potentiality of cutting plotter. We have discussed its features and benefits, how to install and use it, how to get a serial number for it, and how to troubleshoot common issues with it.

-

We have also provided some examples of serial numbers and activation codes for different products of FineCut/Coat9 for CorelDRAW. You can use these serial numbers and activation codes to activate your product and enjoy its full functionality.

-

Call to action and recommendation

-

If you are interested in FineCut/Coat9 for CorelDRAW, you can download it from Mimaki's website and try it for free for 30 days. You can also purchase a new product or upgrade from a previous version of FineCut CD.

-

We recommend you to use FineCut/Coat9 for CorelDRAW with Mimaki cutting plotters, such as CJV, UCJV, CG, CF and CFL series. These cutting plotters are designed to work seamlessly with FineCut/Coat9 and offer high-quality and versatile cutting performance.

-

Thank you for reading this article. We hope you have found it useful and informative. If you have any feedback or questions, please feel free to contact us or leave a comment below.

-

FAQs

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Steam Activation Code - Get Your License Key.txt Now.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Steam Activation Code - Get Your License Key.txt Now.md deleted file mode 100644 index 4785a69bb93897c1722312fd4d0643c96bd9ef4d..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Steam Activation Code - Get Your License Key.txt Now.md +++ /dev/null @@ -1,209 +0,0 @@ - -
Product nameSerial numberActivation code
Finecut 8 Upgrade Kit (for CJV30)F8U-CJV30-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XX XX-XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX X7 Serial Number Activation Code Business October 5 2022 Sumit","F8U-CJV30-ACTI-VATI-ONCO-DEHE-RE00-0000-0000-0000-0000-0000-0000-0000-0000-0000"
- -

Ark: Survival Evolved - A Guide for Beginners

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla APK Chromebook Everything You Need to Know About the Free Platform Fighter.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla APK Chromebook Everything You Need to Know About the Free Platform Fighter.md deleted file mode 100644 index 8fba4b5d71f3de2cfb25d3e0782be7025d7368ee..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla APK Chromebook Everything You Need to Know About the Free Platform Fighter.md +++ /dev/null @@ -1,132 +0,0 @@ - -

How to Play Brawlhalla on Your Chromebook

-

If you are looking for a fun and free platform fighting game that supports cross-play with millions of players on various devices, you might want to check out Brawlhalla. Brawlhalla is a 2D game that features over 50 unique characters, various game modes, and frequent updates. In this article, we will show you how to play Brawlhalla on your Chromebook by installing the APK file of the game.

-

brawlhalla apk chromebook


Download ✒ ✒ ✒ https://urlin.us/2uT1b3



-

What is Brawlhalla?

-

Brawlhalla is a free-to-play game developed by Blue Mammoth Games and published by Ubisoft. It was released in 2017 for Windows, macOS, PlayStation 4, Xbox One, Nintendo Switch, iOS, and Android. It is one of the most popular platform fighting games, with over 100 million players as of 2023.

-

Brawlhalla game features

-

Some of the features that make Brawlhalla stand out are:

- -

Brawlhalla game modes

-

Brawlhalla offers various game modes for different preferences and play styles. Some of the game modes are:

- -

How to Install Brawlhalla APK on Chromebook

-

To play Brawlhalla on your Chromebook, you will need to install the APK file of the game. APK files are Android application packages that contain all the files and resources needed to run an app on an Android device. However, Chromebooks do not support APK files natively, so you will need to enable some settings and use some tools to install them.

-

Enable developer mode and unknown sources

-

The first step is to enable developer mode on your Chromebook. Developer mode allows you to access more features and settings on your device, such as installing APK files. However, it also removes some security protections and may void your warranty, so proceed with caution. To enable developer mode on your Chromebook, follow these steps:

-
    -
  1. Turn off your Chromebook by holding down the power button.
  2. -
  3. Press and hold the Esc + Refresh keys (the circular arrow key above the number 3), then press the power button.
  4. -
  5. You will see a screen that says "Chrome OS is missing or damaged". Press Ctrl + D to enter developer mode.
  6. -

    Press Enter to confirm. You will see a screen that says \"To turn OS verification off, press Enter. Your system will reboot and local data will be cleared. To go back, press Esc.\" Press Enter again. -

  7. Your Chromebook will reboot and show a screen that says \"OS verification is off\". Press Ctrl + D to continue.
  8. -
  9. Your Chromebook will erase all your local data and prepare for developer mode. This may take a few minutes.
  10. -
  11. Once the process is done, your Chromebook will reboot and show the same \"OS verification is off\" screen. Press Ctrl + D again or wait for 30 seconds to boot into developer mode.
  12. -
  13. Once you are in developer mode, you will see a screen that says \"Welcome!\". Select your language and keyboard settings, then click \"Let's go\".
  14. -
  15. Connect to a network and agree to the terms and conditions. Then, sign in with your Google account or create one if you don't have one.
  16. -
  17. You will see a screen that says \"You're almost done\". Click on \"Turn on Google Play Store\".
  18. -
  19. You will see a screen that says \"Get your Android apps\". Click on \"More\" and then \"Accept\".
  20. -
  21. You will see a screen that says \"Google Play Store is ready to use\". Click on \"Got it\".
  22. -
-

Download and install APK file

-

The next step is to download and install the APK file of Brawlhalla on your Chromebook. You can use any file manager Android app from the Play Store to do this. We recommend using Solid Explorer, as it is easy to use and has a lot of features. To download and install the APK file of Brawlhalla, follow these steps:

-
    -
  1. Open the Play Store app on your Chromebook and search for Solid Explorer. Install the app and open it.
  2. -
  3. In Solid Explorer, tap on the hamburger icon on the top left corner and select \"Storage\".
  4. -
  5. Tap on \"Downloads\" and you will see all the files you have downloaded on your Chromebook.
  6. -
  7. If you have not downloaded the APK file of Brawlhalla yet, you can do so by opening a new tab in Chrome and going to [APKMirror.com](^1^). Search for Brawlhalla and download the latest version of the game.
  8. -
  9. Once the download is complete, go back to Solid Explorer and tap on the APK file of Brawlhalla. You will see a pop-up that says \"Do you want to install this application?\" Tap on \"Install\".
  10. -
  11. You will see another pop-up that says \"For your security, your phone is not allowed to install unknown apps from this source.\" Tap on \"Settings\".
  12. -
  13. You will see a screen that says \"Install unknown apps\". Toggle on the switch next to \"Allow from this source\".
  14. -
  15. Go back to Solid Explorer and tap on the APK file of Brawlhalla again. Tap on \"Install\" again.
  16. -
  17. You will see a screen that shows the progress of the installation. Wait for it to finish.
  18. -
  19. Once the installation is done, you will see a screen that says \"App installed\". Tap on \"Open\" to launch the game.
  20. -
-

Launch and enjoy the game

-

The final step is to launch and enjoy Brawlhalla on your Chromebook. You can play the game with your keyboard, mouse, or touchpad, or connect a controller if you prefer. To launch and enjoy Brawlhalla, follow these steps:

-

brawlhalla free download for chromebook
-brawlhalla cross-play on chromebook
-brawlhalla 2D platform fighter for chromebook
-brawlhalla online ranked mode on chromebook
-brawlhalla epic crossover events on chromebook
-brawlhalla best legends for chromebook
-brawlhalla tips and tricks for chromebook
-brawlhalla latest patch notes for chromebook
-brawlhalla how to install on chromebook
-brawlhalla system requirements for chromebook
-brawlhalla gameplay review for chromebook
-brawlhalla custom game modes on chromebook
-brawlhalla tournaments and esports on chromebook
-brawlhalla skins and cosmetics for chromebook
-brawlhalla support and feedback for chromebook
-brawlhalla mobile version for chromebook
-brawlhalla offline mode on chromebook
-brawlhalla keyboard and mouse controls for chromebook
-brawlhalla community and social media for chromebook
-brawlhalla history and lore for chromebook
-brawlhalla fun and casual games on chromebook
-brawlhalla training and practice mode on chromebook
-brawlhalla achievements and rewards for chromebook
-brawlhalla updates and news for chromebook
-brawlhalla bugs and issues on chromebook
-brawlhalla guides and tutorials for chromebook
-brawlhalla fan art and videos for chromebook
-brawlhalla steam version for chromebook
-brawlhalla codes and giveaways for chromebook
-brawlhalla weapons and combos for chromebook
-brawlhalla maps and stages for chromebook
-brawlhalla stats and leaderboards for chromebook
-brawlhalla clans and friends on chromebook
-brawlhalla mods and hacks for chromebook
-brawlhalla test features and experimental mode on chromebook
-brawlhalla balance changes and feedback for chromebook
-brawlhalla all legends pack for chromebook
-brawlhalla soundtrack and music for chromebook
-brawlhalla voice chat and communication on chromebook
-brawlhalla settings and options for chromebook
-brawlhalla developer blog and roadmap for chromebook
-brawlhalla discord server and community for chromebook
-brawlhalla wiki and information for chromebook
-brawlhalla memes and jokes for chromebook
-brawlhalla beginners guide for chromebook

-
    -
  1. If you have not opened the game yet, you can do so by going to the launcher app on your Chromebook and finding Brawlhalla under the Play Store section. Tap on it to open it.
  2. -
  3. You will see a screen that says \"Brawlhalla requires access to photos/media/files on your device.\" Tap on \"Allow\".
  4. -
  5. You will see another screen that says \"Brawlhalla would like to access this device's location.\" Tap on \"Allow only while using the app\".
  6. -
  7. You will see a loading screen with the Brawlhalla logo. Wait for it to load.
  8. -
  9. You will see a welcome screen with some tips and news about the game. Tap on \"Play Now\".
  10. -
  11. You will see a screen that asks you to choose your region. Select the one that is closest to you for better performance.
  12. -
  13. You will see a screen that asks you to create or link an Ubisoft account.

    You can create or link an Ubisoft account to enjoy some benefits, such as saving your progress, unlocking rewards, and accessing exclusive content. You can also skip this step if you want to play as a guest.

  14. -
  15. You will see the main menu of the game, where you can choose from various options, such as online play, offline play, training room, store, settings, and more.
  16. -
  17. Select the option that suits your preference and start playing Brawlhalla on your Chromebook. Have fun!
  18. -
-

Conclusion

-

Brawlhalla is a free and fun platform fighting game that you can play on your Chromebook by installing the APK file of the game. You will need to enable developer mode and unknown sources on your device, download and install the APK file using a file manager app, and launch and enjoy the game. You can also create or link an Ubisoft account to get some extra benefits. Brawlhalla supports cross-play with millions of players on various devices, so you can join your friends or make new ones in this epic game.

-

FAQs

-

Is Brawlhalla safe to play on Chromebook?

-

Yes, Brawlhalla is safe to play on Chromebook as long as you download the APK file from a trusted source, such as [APKMirror.com](^1^). You should also be careful when enabling developer mode and unknown sources on your device, as they may expose your device to some risks.

-

Can I play Brawlhalla with a controller on Chromebook?

-

Yes, you can play Brawlhalla with a controller on Chromebook if you have a compatible controller that can connect to your device via Bluetooth or USB. You can also customize the controller settings in the game menu.

-

How do I update Brawlhalla on Chromebook?

-

To update Brawlhalla on Chromebook, you will need to download and install the latest version of the APK file from [APKMirror.com](^1^) or another trusted source. You can also check for updates in the game menu.

-

How do I uninstall Brawlhalla on Chromebook?

-

To uninstall Brawlhalla on Chromebook, you will need to go to the launcher app on your device and find Brawlhalla under the Play Store section. Right-click on it and select \"Uninstall\". You can also delete the APK file from your downloads folder using Solid Explorer or another file manager app.

-

How do I contact Brawlhalla support?

-

If you have any issues or questions about Brawlhalla, you can contact Brawlhalla support by visiting their official website at [brawlhalla.com] and clicking on \"Support\" at the bottom of the page. You can also follow them on social media platforms, such as Twitter, Facebook, Instagram, YouTube, Twitch, and Discord.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Caribbean Treasures A Game that Will Keep You Hooked - Download and Play Now!.md b/spaces/1phancelerku/anime-remove-background/Caribbean Treasures A Game that Will Keep You Hooked - Download and Play Now!.md deleted file mode 100644 index ed06264d85b8f3a3f18b73e5b49661b2985c5df2..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Caribbean Treasures A Game that Will Keep You Hooked - Download and Play Now!.md +++ /dev/null @@ -1,99 +0,0 @@ - -

Caribbean Treasures Download: How to Play and Win Big

-

If you are looking for a fun and rewarding way to spend your time, you might want to check out Caribbean Treasures. Caribbean Treasures is an online gaming platform that offers exciting fish games and slot games that you can play anytime, anywhere. In this article, we will tell you everything you need to know about Caribbean Treasures, how to download and play it, and how to win big with it.

-

What is Caribbean Treasures?

-

Caribbean Treasures is an online gaming platform that offers two types of games: fish games and slot games. Both types of games are designed to test your skills and luck, and reward you with big treasures. Here are some features of each type of game:

-

caribbean treasures download


DOWNLOAD 🔗 https://jinyurl.com/2uNKe9



-

A fun and exciting fish game

-

The fish game is a shooting game where you have to aim and fire at various fish and sea creatures on the screen. The more fish you catch, the more coins you earn. You can also encounter special fish that give you extra coins, bonuses, or multipliers. The fish game has different levels of difficulty, from easy to hard, and different modes, such as single-player or multiplayer. You can also customize your cannon and use different power-ups to enhance your gameplay.

-

A variety of slot games

-

The slot games are classic casino games where you have to spin the reels and match symbols to win prizes. The slot games have different themes, such as fruits, animals, pirates, or ancient civilizations. The slot games also have different features, such as wilds, scatters, free spins, or bonus rounds. The slot games have different paylines, from 5 to 50, and different bet sizes, from 0.01 to 5 coins per line.

-

How to download and play Caribbean Treasures?

-

Downloading and playing Caribbean Treasures is very easy. Here are the steps you need to follow:

-

caribbean treasures game download
-caribbean treasures slot machine download
-caribbean treasures fish games download
-caribbean treasures reflexive entertainment download
-caribbean treasures free download full version
-caribbean treasures pc game download
-caribbean treasures online slots download
-caribbean treasures emotion rays download
-caribbean treasures software informer download
-caribbean treasures internet archive download
-caribbean treasures match 3 game download
-caribbean treasures hidden object game download
-caribbean treasures deluxe edition download
-caribbean treasures windows 10 download
-caribbean treasures mac game download
-caribbean treasures android game download
-caribbean treasures ios game download
-caribbean treasures mobile game download
-caribbean treasures apk download
-caribbean treasures mod apk download
-caribbean treasures hack apk download
-caribbean treasures cheats apk download
-caribbean treasures unlimited coins apk download
-caribbean treasures free spins apk download
-caribbean treasures bonus codes apk download
-caribbean treasures no deposit apk download
-caribbean treasures play for fun apk download
-caribbean treasures play for real apk download
-caribbean treasures play offline apk download
-caribbean treasures play online apk download
-caribbean treasures multiplayer apk download
-caribbean treasures live casino apk download
-caribbean treasures jackpot apk download
-caribbean treasures progressive jackpot apk download
-caribbean treasures mega jackpot apk download
-caribbean treasures big win apk download
-caribbean treasures mega win apk download
-caribbean treasures super win apk download
-caribbean treasures epic win apk download
-caribbean treasures tips and tricks apk download
-caribbean treasures strategy guide apk download
-caribbean treasures walkthrough apk download
-caribbean treasures review apk download
-caribbean treasures ratings apk download
-caribbean treasures testimonials apk download
-caribbean treasures customer support apk download
-caribbean treasures contact us apk download
-caribbean treasures sign up for free account apk download
-caribbean treasures how to play apk download

-

Download the game for Android or iOS

-

You can download the game for your mobile device from the official website. The game is compatible with both Android and iOS devices. The download is free and fast. You can also download the game for your desktop computer if you prefer.

-

Sign up for a free account

-

After downloading the game, you need to sign up for a free account. You don't need a credit card to sign up. You just need to provide some basic information, such as your name, email address, phone number, and username. The support team will activate your account and contact you with details. You can also contact them anytime if you have any questions or issues.

-

Choose your game and start playing

-

Once you have your account ready, you can choose your game and start playing. You can switch between the fish game and the slot game anytime you want. You can also choose from different rooms or tables depending on your preference. You can play with real money or with virtual coins. You can also play with other players or by yourself.

-

How to win big with Caribbean Treasures?

-

Playing Caribbean Treasures is not only fun but also rewarding. Here are some tips on how to win big with Caribbean Treasures:

-

Use your skills and strategy

-

The fish game and the slot game both require some skills and strategy to win. For the fish game, you need to aim carefully, fire wisely, and avoid wasting bullets. You also need to know which fish are worth more coins, which fish have special effects, and when to use power-ups. For the slot game, you need to know how to adjust your bet size, how to choose the best paylines, and how to trigger the bonus features. You also need to manage your bankroll and set a limit for your losses and wins.

-

Take advantage of bonuses and promotions

-

Caribbean Treasures offers various bonuses and promotions to its players. You can get a welcome bonus when you sign up, a deposit bonus when you make your first deposit, a referral bonus when you invite your friends, and a loyalty bonus when you play regularly. You can also get free coins, free spins, or free tickets from time to time. You can use these bonuses and promotions to increase your chances of winning and have more fun.

-

Join the VIP club for more rewards

-

If you want to enjoy more benefits and rewards, you can join the VIP club of Caribbean Treasures. The VIP club is a membership program that gives you access to exclusive offers, discounts, gifts, and events. You can also get higher payouts, faster withdrawals, and better customer service. You can join the VIP club by earning points from playing the games or by paying a monthly fee.

-

Conclusion

-

Caribbean Treasures is an online gaming platform that offers exciting fish games and slot games that you can play anytime, anywhere. You can download the game for free for your mobile device or desktop computer. You can sign up for a free account and start playing with real money or virtual coins. You can use your skills and strategy, take advantage of bonuses and promotions, and join the VIP club to win big with Caribbean Treasures. So what are you waiting for? Download Caribbean Treasures today and discover the hidden treasures of the Caribbean!

-

FAQs

-

Here are some frequently asked questions about Caribbean Treasures:

-

What is Ark: Survival Evolved?

A brief overview of the game's premise, genre, and features

Ark: Survival Evolved is an action-adventure survival game that was released in 2017 by Studio Wildcard. In this game, you play as a human who wakes up on a mysterious island full of dinosaurs, mythical creatures, natural hazards, and potentially hostile human players. Your goal is to survive by gathering resources, crafting tools, building shelters, taming creatures, exploring the island, and fighting against enemies. You can play solo or with other players in various modes, such as PvE (player versus environment), PvP (player versus player), or PvX (a mix of both). You can also customize your game experience by using mods that add new features, content, and options to the game.

How to get started in Ark: Survival Evolved

The basics of survival: gathering resources, crafting tools, building shelters, and taming creatures

Tips and tricks for finding and harvesting resources

Resources are essential for your survival in Ark: Survival Evolved, as they allow you to craft items, build structures, tame creatures, and more. However, resources are not evenly distributed across the map, and some are more rare and valuable than others. Here are some tips and tricks for finding and harvesting resources:

-

ark survival evolved license key.txt download


Download Zip ★★★ https://urlin.us/2uT12V



-
    -
  • Use your map and explorer notes to locate resource-rich areas, such as mountains, caves, rivers, and swamps. You can also use a GPS or a Compass to navigate more easily.
  • -
  • Use the right tool for the right resource. For example, use a pick to get more flint from rocks, or a hatchet to get more wood from trees. You can also use specialized tools, such as a Metal Pick or a Chainsaw, to get more resources faster and more efficiently.
  • -
  • Use tamed creatures to help you gather resources. Some creatures have special abilities or bonuses for harvesting certain resources, such as the Ankylosaurus for metal, the Doedicurus for stone, the Castoroides for wood, or the Therizinosaur for fiber. You can also use flying creatures, such as the Argentavis or the Quetzal, to transport large amounts of resources.
  • -
  • Be aware of the weight limit of your inventory and your creatures. If you exceed the weight limit, you will move slower and consume more stamina. You can increase your weight capacity by leveling up your Weight stat or by using items such as a Backpack or a Parachute.
  • -
  • Be careful of the dangers that lurk in resource-rich areas. Some areas may have hostile creatures, environmental hazards, or enemy players that may attack you while you are gathering resources. Always be prepared for a fight or a flight, and have an escape plan in case things go wrong.
  • -

Tips and tricks for crafting useful items and weapons

Crafting is one of the core mechanics of Ark: Survival Evolved, as it allows you to create items and weapons that can help you survive and thrive in the game. However, crafting is not always straightforward, and you may need to learn some tips and tricks to craft more effectively. Here are some tips and tricks for crafting useful items and weapons:

-
    -
  • Learn new Engrams as you level up. Engrams are blueprints that unlock new crafting recipes for items and weapons. You can choose which Engrams to learn by spending Engram Points that you earn by leveling up. You can also find Engrams in loot crates, explorer notes, or by defeating bosses.
  • -
  • Use different crafting stations to craft different items and weapons. Some items and weapons can be crafted in your inventory, such as a Stone Pick or a Spear. However, some items and weapons require a specific crafting station, such as a Mortar and Pestle, a Smithy, a Fabricator, or a Tek Replicator. You can also use crafting stations to repair your items and weapons.
  • -
  • Use blueprints to craft higher quality items and weapons. Blueprints are special versions of Engrams that can produce items and weapons with better stats and durability than normal ones. You can find blueprints in loot crates, explorer notes, or by defeating bosses. Blueprints have different quality levels, from Primitive to Ascendant.
  • -
  • Use mods to add new items and weapons to the game. Mods are user-created content that can enhance your game experience by adding new features, content, and customization options to the game. You can find mods on Steam Workshop or on other websites. Some of the most popular mods for Ark: Survival Evolved are Structures Plus (S+), Super Structures (SS), Awesome Spyglass!, Classic Flyers, Eco's Decor Mods, Primal Fear, Ark Eternal, Gaia, Extinction Core, Annunaki Genesis, Ark Additions: The Collection!, Pyria: Mythos Evolved ,and many more.
  • -
  • Use cheats or admin commands to spawn items and weapons instantly. Cheats or admin commands are special codes that can alter the game settings or give you access to items and weapons without having to craft them. However, using cheats or admin commands may disable achievements or affect your game balance. You can find a list of cheats or admin commands on this wiki or on other websites.
  • -

Tips and tricks for building and defending your base

Building and defending your base is another important aspect of Ark: Survival Evolved, as it provides you with a safe place to store your items, craft your weapons, breed your creatures, and more. However, building and defending your base is not easy, and you may need to learn some tips and tricks to do it well. Here are some tips and tricks for building and defending your base:

-
    -
  • Choose a good location for your base. You want to find a spot that has access to resources, water, and flat land, but also has some natural defenses, such as cliffs, caves, or waterfalls. You also want to avoid areas that are too crowded, too dangerous, or too exposed to enemy attacks.
  • -
  • Use different materials and structures to build your base. You can start with basic materials, such as Thatch or Wood, but you should upgrade to stronger materials, such as Stone, Metal, or Tek, as soon as possible. You can also use different structures, such as Foundations, Walls, Ceilings, Doors, Windows, Stairs, Ramps, Pillars, Fences, Gates, Turrets, Traps, and more.
  • -
  • Use different designs and layouts to build your base. You can build your base in any shape or size you want, but you should consider some factors, such as functionality, aesthetics, and security. You can also use different designs and layouts, such as a Tower, a Castle, a Bunker, a Treehouse, a Cave Base, a Raft Base, a Platform Base, or a Floating Base.
  • -
  • Use different strategies and tactics to defend your base. You can use passive defenses, such as Walls, Gates, Turrets, Traps, and Spikes. You can also use active defenses, such as tamed creatures, weapons, explosives, or allies. You can also use stealth defenses, such as camouflage, decoys, or hidden entrances.
  • -
  • Use different modes and settings to protect your base. You can play on PvE servers or single-player mode if you don't want to deal with other players. You can also use settings such as Offline Raid Protection (ORP), Structure Resistance (SR), Structure Decay (SD), or Tribe Governance (TG) to adjust the rules and options for your base.
  • -

Tips and tricks for taming and riding dinosaurs and other creatures

Taming and riding dinosaurs and other creatures is one of the most fun and rewarding aspects of Ark: Survival Evolved, as it allows you to have loyal companions that can help you in various ways. However, taming and riding dinosaurs and other creatures is not easy, and you may need to learn some tips and tricks to do it well. Here are some tips and tricks for taming and riding dinosaurs and other creatures:

-
    -
  • Use different methods and items to tame dinosaurs and other creatures. There are two main methods of taming: passive taming and knockout taming. Passive taming involves feeding the creature its preferred food while avoiding its aggression. Knockout taming involves knocking out the creature with tranquilizers or other means and then feeding it its preferred food while keeping it unconscious. You can also use items such as Bolas, Nets, Cages, Traps, Lassos, or Taming Pens to immobilize or capture the creature.
  • -
  • Use different foods and kibbles to tame dinosaurs and other creatures faster and more efficiently. Different creatures have different preferences for food and kibble. Food is any edible item that can fill the creature's hunger bar. Kibble is a special type of food that is made from eggs and other ingredients in a Cooking Pot or an Industrial Cooker. Kibble has higher taming effectiveness than regular food.
  • -
  • Use different saddles and platforms to ride dinosaurs and other creatures more comfortably and securely. Saddles are items that allow you to ride certain creatures once they are tamed. Saddles have different quality levels from Primitive to Ascendant that affect their armor value and durability. Platforms are special types of saddles that allow you to build structures on top of certain creatures.
  • -/Disable Group/Set Aggression Level/Set Turret Mode and more. Settings are options that you can adjust for your tamed creatures by using the radial menu or the options menu. Settings include Enable/Disable Ally Looking/Enable/Disable Resource Harvesting/Enable/Disable Victim Item Collection/Enable/Disable Allow Anyone To Imprint/Enable/Disable Public Seating/Enable/Disable Auto-Courtesy/Enable/Disable Auto-Engage and more. -
  • Use different stats and mutations to breed dinosaurs and other creatures more effectively and selectively. Stats are numerical values that represent the attributes of your tamed creatures, such as Health, Stamina, Oxygen, Food, Weight, Melee Damage, Movement Speed, Torpidity, and more. Mutations are random changes that occur in the stats or colors of your offspring when you breed two tamed creatures. Mutations can be beneficial or detrimental, and they can be inherited or not by the next generation.
  • -

How to enhance your experience in Ark: Survival Evolved

The benefits of playing with other players: forming tribes, trading, and cooperating

Playing with other players can enhance your experience in Ark: Survival Evolved by adding more fun, challenge, and diversity to the game. You can play with other players in various ways, such as forming tribes, trading, and cooperating. Here are some benefits of playing with other players:

-
    -
  • Forming tribes can help you survive and thrive in the game by sharing resources, items, structures, creatures, and responsibilities with your tribe mates. You can also access tribe-only features, such as tribe chat, tribe log, tribe rank, tribe alliances, tribe wars, and more.
  • -
  • Trading can help you acquire resources, items, creatures, or services that you may not have or need in exchange for something that you have or don't need. You can trade with other players by using chat, voice chat, signs, mailboxes, trading posts, vending machines, or other means.
  • -
  • Cooperating can help you achieve goals that you may not be able to achieve alone or with your tribe mates. You can cooperate with other players by joining forces to explore the map, tame creatures, build structures, fight enemies, complete missions, defeat bosses, or participate in events.
  • -

The challenges of playing with other players: competing, raiding, and surviving

Playing with other players can also pose some challenges in Ark: Survival Evolved by adding more risk, conflict, and uncertainty to the game. You can play with other players in various ways, such as competing, raiding, and surviving. Here are some challenges of playing with other players:

-
    -
  • Competing can test your skills and abilities in the game by measuring your performance against other players. You can compete with other players by comparing your stats, achievements, trophies, rankings, or scores. You can also participate in competitive modes, such as PvP, PvX, or Survival of the Fittest.
  • -
  • Raiding can challenge your strategy and tactics in the game by attacking or defending against other players' bases. You can raid other players by using weapons, explosives, creatures, or allies to breach their defenses and loot their resources, items, structures, or creatures. You can also defend against raids by using walls, gates, turrets, traps, creatures, or allies to repel their attacks and protect your assets.
  • -
  • Surviving can challenge your endurance and adaptability in the game by facing other players' threats and actions. You can survive with other players by avoiding or dealing with hostile players who may try to kill you, rob you, capture you, enslave you, or troll you. You can also survive in different environments and biomes that may have different weather conditions, temperatures, resources, creatures, or hazards.
  • -

The fun of playing with mods: adding new features, content, and customization

Playing with mods can enhance your experience in Ark: Survival Evolved by adding new features, content, and customization to the game. Mods are user-created content that can change or improve the game in various ways. You can play with mods by finding and installing them on Steam Workshop or on other websites. Here are some fun of playing with mods:

-
    -
  • Adding new features can expand the gameplay and mechanics of the game by introducing new elements or options that are not available in the base game. For example, you can use mods that add new crafting systems, new combat systems, new building systems, new taming systems, new mission systems, new event systems, new difficulty settings, new game modes, and more.
  • -new weapons, new armor, new vehicles, new structures, new skins, new hairstyles, new tattoos, new music, new sounds, new voiceovers, and more. -
  • Adding new customization can personalize your game experience by allowing you to modify or adjust the game to your liking. For example, you can use mods that change the graphics, the UI, the controls, the camera, the animations, the effects, the colors, the fonts, the icons, the names, and more.
  • -

Conclusion

A summary of the main points of the article

In conclusion, Ark: Survival Evolved is a game that offers a lot of fun and challenge for players who enjoy action-adventure survival games. In this game, you can explore a mysterious island full of dinosaurs and other creatures, gather resources, craft items and weapons, build and defend your base, tame and ride creatures, play with other players or against them, and customize your game with mods. This article has provided you with some tips and tricks on how to get started in Ark: Survival Evolved, how to enhance your experience in Ark: Survival Evolved, and how to download ark survival evolved license key.txt file.

-

ark survival evolved free steam key download
-ark survival evolved activation key download
-ark survival evolved license key generator download
-ark survival evolved cracked key download
-ark survival evolved redeem key download
-ark survival evolved product key download
-ark survival evolved serial key download
-ark survival evolved steam code download
-ark survival evolved license file download
-ark survival evolved keygen download
-ark survival evolved license code download
-ark survival evolved registration key download
-ark survival evolved cd key download
-ark survival evolved game key download
-ark survival evolved license number download
-ark survival evolved steam key txt download
-ark survival evolved activation code download
-ark survival evolved license crack download
-ark survival evolved license patch download
-ark survival evolved license txt file download
-ark survival evolved license key online download
-ark survival evolved license key free download
-ark survival evolved license key no survey download
-ark survival evolved license key 2023 download
-ark survival evolved license key reddit download
-ark survival evolved license key youtube download
-ark survival evolved license key giveaway download
-ark survival evolved license key working download
-ark survival evolved license key legit download
-ark survival evolved license key verified download
-ark survival evolved license key hack download
-ark survival evolved license key generator online download
-ark survival evolved license key generator no survey download
-ark survival evolved license key generator 2023 download
-ark survival evolved license key generator reddit download
-ark survival evolved license key generator youtube download
-ark survival evolved license key generator free download
-ark survival evolved license key generator working download
-ark survival evolved license key generator legit download
-ark survival evolved license key generator verified download
-ark survival evolved license key generator hack download
-how to get ark survival evolved license key txt download
-how to install ark survival evolved license key txt download
-how to use ark survival evolved license key txt download
-how to activate ark survival evolved license key txt download
-how to fix ark survival evolved license key txt download
-how to update ark survival evolved license key txt download
-how to remove ark survival evolved license key txt download

A call to action for the readers to try out the game or share their own tips and tricks

If you are interested in trying out Ark: Survival Evolved, you can download it from Steam or other platforms. You can also download ark survival evolved license key.txt file from this link to activate the game. However, be careful of fake or malicious links that may harm your computer or steal your information. Always use a trusted source and scan your files before opening them.

-

If you have already played Ark: Survival Evolved or have your own tips and tricks to share, feel free to leave a comment below or join our community on Discord . We would love to hear from you and learn from your experience. Thank you for reading this article and have fun playing Ark: Survival Evolved!

FAQs

Q: How do I download ark survival evolved license key.txt file?

A: You can download ark survival evolved license key.txt file from this link . However, be careful of fake or malicious links that may harm your computer or steal your information. Always use a trusted source and scan your files before opening them.

Q: How do I install mods for Ark: Survival Evolved?

A: You can install mods for Ark: Survival Evolved by using Steam Workshop or other websites. To use Steam Workshop, you need to subscribe to the mod you want to use and then launch the game. The mod will be automatically downloaded and installed. To use other websites, you need to download the mod file and then copy it to the Mods folder in your game directory. You can also use mod managers, such as Ark Server Manager or Ark Mod Downloader, to make the process easier.

Q: How do I join a tribe in Ark: Survival Evolved?

A: You can join a tribe in Ark: Survival Evolved by either creating your own tribe or joining an existing one. To create your own tribe, you need to access the Tribe Manager menu and then click on Create New Tribe. You can then invite other players to join your tribe by using the Invite To Tribe option in the radial menu. To join an existing tribe, you need to receive an invitation from a tribe member or leader and then accept it. You can also request to join a tribe by using the Request To Join Tribe option in the radial menu.

Q: How do I tame a dinosaur in Ark: Survival Evolved?

A: You can tame a dinosaur in Ark: Survival Evolved by either using passive taming or knockout taming. Passive taming involves feeding the dinosaur its preferred food while avoiding its aggression. Knockout taming involves knocking out the dinosaur with tranquilizers or other means and then feeding it its preferred food while keeping it unconscious. You can also use items such as Bolas, Nets, Cages, Traps, Lassos, or Taming Pens to immobilize or capture the dinosaur.

Q: How do I breed dinosaurs in Ark: Survival Evolved?

A: You can breed dinosaurs in Ark: Survival Evolved by mating two tamed dinosaurs of the same species and opposite gender. To mate them, you need to enable mating on both of them and then place them near each other. They will then produce a fertilized egg or a gestation bar, depending on their type. You need to incubate the egg or wait for the gestation bar to fill up until the baby dinosaur is born. You then need to imprint and raise the baby dinosaur until it reaches adulthood.

- - - - -
DeviceOperating SystemMemory
Android4.4 or higher1 GB or higher
iOS9.0 or higher1 GB or higher
DesktopWindows 7 or higher / Mac OS X 10.10 or higher2 GB or higher
-
  • How can I withdraw my winnings from Caribbean Treasures?
  • -

    You can withdraw your winnings from Caribbean Treasures by using one of the following methods: PayPal, Skrill, Neteller, Bitcoin, or bank transfer. The minimum withdrawal amount is $20 and the maximum withdrawal amount is $10,000 per day. The withdrawal process may take up to 48 hours depending on the method you choose.

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Chess Online APK Download Play with Millions of Players Worldwide.md b/spaces/1phancelerku/anime-remove-background/Chess Online APK Download Play with Millions of Players Worldwide.md deleted file mode 100644 index 9f5f479b600f11fc6dc69c9d456c5e46ebd57851..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Chess Online APK Download Play with Millions of Players Worldwide.md +++ /dev/null @@ -1,146 +0,0 @@ -
    -

    Chess Online APK Download: How to Play Chess on Your Android Device

    -

    Chess is one of the oldest and most popular board games in the world. It is a game of strategy, logic, and skill that can challenge your mind and improve your cognitive abilities. Whether you are a beginner or a master, chess can offer you endless hours of fun and entertainment.

    -

    chess online apk download


    Download Zip ••• https://jinyurl.com/2uNLjO



    -

    But what if you don't have a chess board or a partner to play with? What if you want to play chess anytime and anywhere, without any hassle or cost? Well, there is a solution for that: chess online apk.

    -

    Introduction

    -

    What is chess online apk?

    -

    Chess online apk is an application that allows you to play chess online on your Android device. It is not an official app from any chess organization or website, but rather a third-party app that connects you to various chess servers and platforms. You can download it for free from different sources on the internet, but you need to be careful about the quality and security of the apk file.

    -

    Why play chess online on your Android device?

    -

    Playing chess online on your Android device has many advantages over playing on a physical board or a computer. Here are some of them:

    - -

    How to download and install chess online apk

    -

    Step 1: Find a reliable source for the apk file

    -

    The first step to download and install chess online apk is to find a reliable source for the apk file. There are many websites that offer free downloads of chess online apk, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before downloading anything from the internet.

    -

    One of the best sources for chess online apk is [Chess.com](^1^), which is the #1 free chess app in the world. It has over 50 million users and offers a variety of features and benefits for chess lovers. You can download the apk file from their official website or from other reputable sites like [APKPure](https://apkpure.com/chess-play-learn/com.chess) or [APKMirror](https://www.apkmirror.com/apk/chess-com/chess-play-learn/).

    -

    Step 2: Enable unknown sources on your device settings

    -

    The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store or other official sources. To do this, follow these steps:

    -

    chess online apk download free
    -chess online apk download latest version
    -chess online apk download for android
    -chess online apk download 2023
    -chess online apk download mod
    -chess online apk download offline
    -chess online apk download unlimited
    -chess online apk download 3d
    -chess online apk download with friends
    -chess online apk download no ads
    -chess online apk download hack
    -chess online apk download pro
    -chess online apk download premium
    -chess online apk download full
    -chess online apk download cracked
    -chess online apk download update
    -chess online apk download old version
    -chess online apk download new version
    -chess online apk download beta
    -chess online apk download original
    -chess online apk download best
    -chess online apk download top
    -chess online apk download android 11
    -chess online apk download android 10
    -chess online apk download android 9
    -chess online apk download android 8
    -chess online apk download android 7
    -chess online apk download android 6
    -chess online apk download android 5
    -chess online apk download android 4.4.4
    -chess online apk download for pc
    -chess online apk download for laptop
    -chess online apk download for windows 10
    -chess online apk download for windows 7
    -chess online apk download for windows 8.1
    -chess online apk download for macbook pro
    -chess online apk download for macbook air
    -chess online apk download for mac os x
    -chess online apk download for ios
    -chess online apk download for iphone x
    -chess online apk download for iphone 11 pro max
    -chess online apk download for ipad pro
    -chess online apk download for ipad air
    -chess online apk download for ipad mini
    -chess online apk download for fire tablet
    -chess online apk download for kindle fire
    -chess online apk download for chromebook
    -chess online apk download from google play store
    -chess online apk download from apkpure

    -
      -
    1. Go to your device settings and look for the security option and tap on it.
    2. -
    3. Find the option that says unknown sources or install unknown apps and toggle it on.
    4. -
    5. A warning message will pop up, telling you the risks of installing apps from unknown sources. Read it carefully and tap on OK if you agree.
    6. -
    -

    Step 3: Download and install the apk file

    -

    The final step is to download and install the apk file. To do this, follow these steps:

    -
      -
    1. Go to the website where you found the chess online apk file and tap on the download button.
    2. -
    3. Wait for the download to finish and then open the file manager app on your device.
    4. -
    5. Find the downloaded apk file and tap on it to start the installation process.
    6. -
    7. Follow the instructions on the screen and grant the necessary permissions for the app to run.
    8. -
    9. Once the installation is complete, you can launch the app and start playing chess online.
    10. -
    -

    How to play chess online with the app

    -

    Step 1: Create an account or log in with an existing one

    -

    Before you can play chess online with the app, you need to create an account or log in with an existing one. This will allow you to access all the features and benefits of the app, such as saving your games, joining tournaments, earning ratings, and more. To do this, follow these steps:

    -
      -
    1. Open the app and tap on the menu icon on the top left corner of the screen.
    2. -
    3. Tap on sign up or log in, depending on whether you have an account or not.
    4. -
    5. If you are signing up, enter your email address, username, password, and country. You can also sign up with your Facebook or Google account.
    6. -
    7. If you are logging in, enter your email address or username and password. You can also log in with your Facebook or Google account.
    8. -
    9. Tap on the sign up or log in button and wait for the confirmation message.
    10. -
    -

    Step 2: Choose a game mode and a skill level

    -

    After creating an account or logging in, you can choose a game mode and a skill level that suits your preferences. There are four game modes available on the app: play online, play offline, puzzles, and lessons. You can also choose a skill level from beginner to expert, depending on how confident you are in your chess abilities. To do this, follow these steps:

    -
      -
    1. Tap on the game mode that you want to play. For example, if you want to play online, tap on play online.
    2. -
    3. Select a skill level that matches your level of expertise. For example, if you are a beginner, tap on beginner.
    4. -
    5. You will see a list of available opponents that match your skill level. You can also filter them by rating, time control, color preference, and more.
    6. -
    7. Tap on an opponent that you want to play with and wait for them to accept your challenge.
    8. -
    -

    Step 3: Start playing and enjoy the game

    -

    The last step is to start playing and enjoy the game. You will see a chess board with your pieces and your opponent's pieces on it. You can move your pieces by tapping on them and then tapping on the square where you want to move them. You can also chat with your opponent by tapping on the chat icon on the bottom right corner of the screen. To play chess online with the app, follow these steps:

    -
      -
    1. Make your move by tapping on a piece and then tapping on a square where you want to move it.
    2. -
    3. Wait for your opponent to make their move. You will see their move on the board and hear a sound notification.
    4. -
    5. Continue making moves until one of you wins, loses, or draws the game. You can also offer or accept a draw by tapping on the draw icon on the top right corner of the screen.
    6. -
    7. When the game is over, you will see a summary of the game with your rating changes, statistics, analysis, and more. You can also rematch your opponent by tapping on the rematch button or find a new opponent by tapping on the new game button.
    8. -
    -

    Features and benefits of chess online apk

    -

    Play with millions of players from around the world

    -

    One of the main features and benefits of chess online apk is that you can play with millions of players from around the world, with different skill levels and styles. You can find opponents that match your rating, time control, color preference, and more. You can also join tournaments, clubs, teams, and events that are organized by the app or by other users. You can also challenge your friends or invite them to play with you by using the app's social features.

    -

    Improve your skills with puzzles, lessons, and analysis tools

    -

    Another feature and benefit of chess online apk is that you can improve your skills with puzzles, lessons, and analysis tools that are available on the app. You can solve thousands of puzzles that are tailored to your skill level and help you practice different aspects of the game, such as tactics, strategy, endgames, and more. You can also learn from hundreds of lessons that are taught by expert coaches and cover various topics, such as openings, middlegames, endgames, and more. You can also use the app's analysis tools to review your games, find your mistakes, and learn from them. You can also access a powerful chess engine that can evaluate any position and suggest the best moves.

    -

    Customize your board, pieces, and themes

    -

    A third feature and benefit of chess online apk is that you can customize your board, pieces, and themes according to your preferences. You can choose from a variety of board styles, piece sets, and themes that suit your taste and mood. You can also adjust the sound effects, the animation speed, the board orientation, and more. You can also create your own custom board and pieces by using the app's editor feature.

    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, chess online apk is an application that allows you to play chess online on your Android device. It is not an official app from any chess organization or website, but rather a third-party app that connects you to various chess servers and platforms. You can download it for free from different sources on the internet, but you need to be careful about the quality and security of the apk file.

    -

    Chess online apk has many features and benefits that make it a great choice for chess lovers. You can play with millions of players from around the world, improve your skills with puzzles, lessons, and analysis tools, and customize your board, pieces, and themes. You can also save your games, join tournaments, chat with your opponents, and more.

    -

    Call to action

    -

    If you are interested in playing chess online on your Android device, you should give chess online apk a try. It is easy to download and install, and it will provide you with endless hours of fun and entertainment. You will also be able to improve your chess skills and meet new friends along the way. So what are you waiting for? Download chess online apk today and start playing chess online!

    -

    FAQs

    -

    Here are some frequently asked questions about chess online apk:

    -
      -
    1. Is chess online apk safe to use?
    2. -

      Chess online apk is safe to use as long as you download it from a reliable source like [Chess.com], [APKPure], or [APKMirror]. These sources have verified the apk file and ensured that it does not contain any viruses, malware, or spyware that can harm your device or steal your personal information. However, you should always be careful and do some research before downloading anything from the internet, as there may be some fake or malicious sources that try to trick you.

      -
    3. How can I update chess online apk?
    4. -

      Chess online apk is updated regularly by the developers to fix bugs, improve performance, and add new features. You can check for updates by opening the app and tapping on the menu icon on the top left corner of the screen. Then, tap on settings and look for the update option. If there is an update available, you can tap on it and download it. Alternatively, you can also check for updates by visiting the website where you downloaded the apk file and looking for the latest version.

      -
    5. Can I play chess online apk offline?
    6. -

      Chess online apk requires an internet connection to play online with other players or access some of the features and benefits of the app. However, you can also play chess online apk offline by choosing the play offline game mode. This will allow you to play against the app's artificial intelligence, which has different skill levels and personalities. You can also access some of the puzzles and lessons that are available offline.

      -
    7. Can I play chess online apk on other devices?
    8. -

      Chess online apk is designed for Android devices, but you can also play it on other devices that support Android applications. For example, you can play chess online apk on your Windows PC or laptop by using an Android emulator like [BlueStacks](https://www.bluestacks.com/) or [NoxPlayer](https://www.bignox.com/). You can also play chess online apk on your iOS device by using an app like [iAndroid](https://apps.apple.com/us/app/iandroid/id1447357030) or [Appetize.io](https://appetize.io/). However, you may experience some compatibility issues or performance problems when playing chess online apk on other devices.

      -
    9. What are some alternatives to chess online apk?
    10. -

      If you are looking for some alternatives to chess online apk, you may want to try some of these apps:

      -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 APK MOD Data for Android (Free Shopping).md b/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 APK MOD Data for Android (Free Shopping).md deleted file mode 100644 index 56528d6251440220f0ae4300f910e9e5f87579a9..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 APK MOD Data for Android (Free Shopping).md +++ /dev/null @@ -1,88 +0,0 @@ - -

      Download NBA 2K20 Mod APK Revdl: How to Enjoy the Best Basketball Game on Your Android Device

      -

      If you are a fan of basketball and want to experience the thrill of playing with your favorite NBA stars on your mobile device, then you should definitely check out NBA 2K20. This is one of the most popular and realistic basketball games available for Android devices, with stunning graphics, smooth gameplay, and tons of features. However, if you want to enjoy the game to the fullest, you might want to download NBA 2K20 Mod APK Revdl, which is a modified version of the game that offers free shopping, unlimited VC, and more. In this article, we will tell you what NBA 2K20 is, what are the benefits of NBA 2K20 Mod APK Revdl, and how to download and install it on your device.

      -

      download nba 2k20 mod apk revdl


      Download File 🗸🗸🗸 https://jinyurl.com/2uNLx4



      -

      What is NBA 2K20?

      -

      NBA 2K20 is a basketball simulation game developed by Visual Concepts and published by 2K Sports. It is the 21st installment in the NBA 2K franchise and the successor to NBA 2K19. The game features various modes that allow you to play as your favorite NBA players, create your own custom player, or run your own team. Some of the modes include:

      -

      Features of NBA 2K20

      -

      - 5 new NBA Stories

      -

      This mode lets you follow the careers of five legendary NBA players, such as Kobe Bryant, Tim Duncan, Kevin Garnett, Dirk Nowitzki, and Dwyane Wade. You can relive their memorable moments, challenges, and achievements in this mode.

      -

      - A new MyCAREER storyline

      -

      This mode lets you create your own custom player and guide him through his journey from high school to college to the NBA. You can choose your position, skills, attributes, appearance, and personality. You can also interact with other characters, make decisions that affect your career path, and earn endorsements and fans.

      -

      download nba 2k20 mod apk rexdl
      -download nba 2k20 mod apk unlimited money
      -download nba 2k20 mod apk obb
      -download nba 2k20 mod apk android 1
      -download nba 2k20 mod apk latest version
      -download nba 2k20 mod apk offline
      -download nba 2k20 mod apk free shopping
      -download nba 2k20 mod apk data
      -download nba 2k20 mod apk no verification
      -download nba 2k20 mod apk andropalace
      -download nba 2k20 mod apk highly compressed
      -download nba 2k20 mod apk unlimited vc
      -download nba 2k20 mod apk full unlocked
      -download nba 2k20 mod apk for pc
      -download nba 2k20 mod apk happymod
      -download nba 2k20 mod apk with commentary
      -download nba 2k20 mod apk mega
      -download nba 2k20 mod apk mediafıre
      -download nba 2k20 mod apk all star
      -download nba 2k20 mod apk zippyshare
      -download nba 2k20 mod apk update
      -download nba 2k20 mod apk cheat
      -download nba 2k20 mod apk real faces
      -download nba 2k20 mod apk blacktop mode
      -download nba 2k20 mod apk pure
      -download nba 2k20 mod apk mirror
      -download nba 2k20 mod apk gameplay
      -download nba 2k20 mod apk hack
      -download nba 2k20 mod apk cracked
      -download nba 2k20 mod apk original
      -download nba 2k20 mod apk vip
      -download nba 2k20 mod apk new roster
      -download nba 2k20 mod apk direct link
      -download nba 2k20 mod apk google drive
      -download nba 2k20 mod apk best settings
      -download nba 2k20 mod apk no root
      -download nba 2k20 mod apk unlimited coins
      -download nba 2k20 mod apk file
      -download nba 2k20 mod apk online mode
      -download nba 2k20 mod apk for ios

      -

      - An all-new Run The Streets mode

      -

      This mode lets you take your MyPLAYER to the streets and compete in 3-on-3 tournaments around the world. You can earn rewards, upgrade your skills, and climb the leaderboards in this mode.

      -

      - 5-on-5 basketball with current or all-time great NBA teams

      -

      This mode lets you play as any of the current or historic NBA teams, such as the Los Angeles Lakers, the Chicago Bulls, the Boston Celtics, and more. You can also create your own custom teams and rosters in this mode.

      -

      - Streetball in Blacktop mode

      -

      This mode lets you play streetball with your own rules and settings. You can choose the number of players, the court, the time limit, and the difficulty level. You can also use your MyPLAYER or any NBA player in this mode.

      -

      - Customization options for players, teams, and courts

      -

      This mode lets you customize various aspects of the game, such as the appearance, skills, attributes, animations, accessories, and outfits of your players. You can also edit the logos, jerseys, arenas, and courts of your teams.

      -

      - Online multiplayer and leaderboards

      -

      This mode lets you play online with or against other players from around the world. You can join or create online matches, leagues, tournaments, and events. You can also compete for rankings, rewards, and bragging rights in this mode.

      -

      What is NBA 2K20 Mod APK Revdl?

      -

      NBA 2K20 Mod APK Revdl is a modified version of NBA 2K20 that offers some additional features and benefits that are not available in the original game. NBA 2K20 Mod APK Revdl is created by REXDL, which is a website that provides free download links for various modded games and apps for Android devices. NBA 2K20 Mod APK Revdl is one of the most popular and downloaded modded games on REXDL.

      -

      Benefits of NBA 2K20 Mod APK Revdl

      -

      - Free shopping for clothes, shoes, accessories, and more

      -

      One of the benefits of NBA 2K20 Mod APK Revdl is that it allows you to shop for free in the game. You can buy any clothes, shoes, accessories, and other items that you want for your players without spending any real money or virtual currency. You can also unlock all the premium items that are normally locked behind a paywall.

      -

      - Unlimited virtual currency (VC) to upgrade your skills and attributes

      -

      Another benefit of NBA 2K20 Mod APK Revdl is that it gives you unlimited VC, which is the main currency used in the game. You can use VC to upgrade your skills and attributes, such as shooting, passing, dribbling, defense, rebounding, and more. You can also use VC to buy packs, cards, boosts, and other items in the game.

      -

      - No ads or in-app purchases

      -

      A third benefit of NBA 2K20 Mod APK Revdl is that it removes all the ads and in-app purchases from the game. You can enjoy the game without any interruptions or distractions from annoying ads or pop-ups. You can also avoid spending any real money on in-app purchases that might affect your gameplay or progress.

      -

      - Easy installation and compatibility with most Android devices

      -

      A fourth benefit of NBA 2K20 Mod APK Revdl is that it is easy to install and compatible with most Android devices. You do not need to root your device or use any special tools or methods to install the modded game. You just need to download the APK file and the OBB data file from REXDL and follow some simple steps to install them on your device. You can also run the game smoothly on most Android devices without any lag or glitches.

      As a bonus, we have also prepared a table that compares the features of NBA 2K20 and NBA 2K20 Mod APK Revdl. You can use this table to see the differences and similarities between the two versions of the game.

      - Table 3: Comparison of NBA 2K20 and NBA 2K20 Mod APK Revdl | Feature | NBA 2K20 | NBA 2K20 Mod APK Revdl | | --- | --- | --- | | Graphics | High-quality graphics with realistic animations and effects | Same as NBA 2K20 | | Gameplay | Smooth and responsive gameplay with various modes and options | Same as NBA 2K20 | | Features | 5 new NBA Stories, a new MyCAREER storyline, an all-new Run The Streets mode, 5-on-5 basketball with current or all-time great NBA teams, streetball in Blacktop mode, customization options for players, teams, and courts, online multiplayer and leaderboards | Same as NBA 2K20, plus free shopping for clothes, shoes, accessories, and more, unlimited virtual currency (VC) to upgrade your skills and attributes, no ads or in-app purchases | | Installation | Requires downloading from the Google Play Store or other trusted sources, may require additional data download after installation, may require payment for some features or items | Requires downloading from REXDL website, requires downloading both APK file and OBB data file, requires enabling installation of apps from unknown sources, requires extracting OBB data file to Android/OBB folder, does not require payment for any features or items | | Compatibility | Compatible with most Android devices with Android 4.3 or higher, may require high-end devices for optimal performance | Compatible with most Android devices with Android 4.3 or higher, may require high-end devices for optimal performance |

      We hope that this table has been useful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.

      -

      Now that you have learned everything you need to know about NBA 2K20 Mod APK Revdl, it is time to download it and enjoy the best basketball game on your Android device. You will not regret it!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about NBA 2K20 Mod APK Revdl that you might find helpful.

      -

      Q: Is NBA 2K20 Mod APK Revdl safe to download and install?

      -

      A: Yes, NBA 2K20 Mod APK Revdl is safe to download and install. It does not contain any viruses, malware, or spyware that might harm your device or compromise your privacy. However, you should always download it from the official website of REXDL and not from any other sources that might be unreliable or malicious.

      -

      Q: Is NBA 2K20 Mod APK Revdl legal to use?

      -

      A: Yes, NBA 2K20 Mod APK Revdl is legal to use. It does not violate any laws or regulations that might prohibit the use of modded games or apps. However, you should always use it at your own risk and discretion. We are not responsible for any consequences that might arise from using NBA 2K20 Mod APK Revdl.

      -

      Q: Will NBA 2K20 Mod APK Revdl affect my original game progress or data?

      -

      A: No, NBA 2K20 Mod APK Revdl will not affect your original game progress or data. It will create a separate folder and file for the modded game on your device. You can still play the original game without any interference or conflict with the modded game. You can also switch between the two versions of the game as you wish.

      -

      Q: Can I play online with other players using NBA 2K20 Mod APK Revdl?

      -

      A: Yes, you can play online with other players using NBA 2K20 Mod APK Revdl. However, you should be aware that some players might not appreciate playing with someone who has an unfair advantage over them. You might also face some issues or errors while playing online with the modded game. Therefore, we recommend that you play online with other players who are also using NBA 2K20 Mod APK Revdl or play offline with the modded game.

      -

      Q: Can I update NBA 2K20 Mod APK Revdl to the latest version?

      -

      A: Yes, you can update NBA 2K20 Mod APK Revdl to the latest version. However, you should always check the official website of REXDL for the latest update and download link. You should also backup your modded game data before updating to avoid losing any progress or settings. You should also follow the same steps as before to install the updated version of the modded game. You should also disable the automatic update of the game from the Google Play Store or other sources to avoid overwriting the modded game.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download for Spider Solitaire The Original and Best Solitaire Game.md b/spaces/1phancelerku/anime-remove-background/Download for Spider Solitaire The Original and Best Solitaire Game.md deleted file mode 100644 index c50f3f0dbd05ad17678bf2cf2af3a9b3b64d439a..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download for Spider Solitaire The Original and Best Solitaire Game.md +++ /dev/null @@ -1,120 +0,0 @@ -
      -

      Download for Spider Solitaire: How to Play the Classic Card Game Online and for Free

      -

      Spider Solitaire is one of the most popular and addictive card games in the world. It is a fun and challenging way to test your logic, patience, and concentration skills. But did you know that you can download and play Spider Solitaire online and for free? In this article, we will tell you everything you need to know about Spider Solitaire, including its history, rules, variations, benefits, tips, tricks, and more. We will also show you the best websites to download and play Spider Solitaire online and for free. So, if you are ready to join millions of other Spider Solitaire fans, read on!

      -

      What is Spider Solitaire?

      -

      Spider Solitaire is a type of solitaire game that uses two decks of cards. The goal is to arrange all the cards in the tableau (the playing area) into eight piles of cards, each pile containing cards of the same suit in descending order from King to Ace. Once a pile is completed, it is moved to the foundation (the area at the top) and removed from the game. The game is won when all eight piles are completed.

      -

      download for spider solitaire


      Download Filehttps://jinyurl.com/2uNPfd



      -

      The history and popularity of Spider Solitaire

      -

      Spider Solitaire was first introduced in 1949 by F. R. Simon in his book "The Playing Card". It was originally called "Scorpion" and had some different rules. The name "Spider" was given by Thomas Warfield, who created a computer version of the game in 1996. He also added some variations, such as different levels of difficulty based on the number of suits used (one, two, or four). Since then, Spider Solitaire has become one of the most popular solitaire games in the world, especially after it was included in Microsoft Windows in 2001. According to some estimates, more than 100 million people play Spider Solitaire every day.

      -

      The rules and variations of Spider Solitaire

      -

      The basic rules of Spider Solitaire are as follows:

      - -

      There are also some variations of Spider Solitaire that have different rules or features. For example:

      - -

      How to download and play Spider Solitaire online and for free?

      -

      If you want to download and play Spider Solitaire online and for free, you have many options to choose from. You can play Spider Solitaire on your computer, smartphone, tablet, or any other device that has an internet connection and a web browser. You can also download Spider Solitaire apps or software for your device if you prefer. Here are some of the benefits of playing Spider Solitaire online and for free:

      -

      The benefits of playing Spider Solitaire online and for free

      - -

      The best websites to download and play Spider Solitaire online and for free

      -

      There are many websites that offer Spider Solitaire online and for free, but not all of them are reliable, safe, or fun. To help you find the best websites to download and play Spider Solitaire online and for free, we have selected three of them that we think are the best. Here they are:

      -

      Spider Solitaire - Play Online

      -

      This website is one of the most popular and trusted websites to play Spider Solitaire online and for free. It has a simple and user-friendly interface, a smooth and fast gameplay, and a variety of options and features. You can choose from one, two, or four suits, undo your moves, use hints, change the background color, adjust the sound volume, and more. You can also see your score, time, moves, and statistics. You can play Spider Solitaire on this website on any device that has a web browser.

      -

      download for spider solitaire collection free
      -download for spider solitaire card games
      -download for spider solitaire classic 2022
      -download for spider solitaire HD free
      -download for spider solitaire pro
      -download for spider solitaire windows 10
      -download for spider solitaire windows 8.1
      -download for spider solitaire offline
      -download for spider solitaire no ads
      -download for spider solitaire with hints
      -download for spider solitaire one suit
      -download for spider solitaire two suits
      -download for spider solitaire four suits
      -download for spider solitaire scorpion
      -download for spider solitaire mobilityware
      -download for spider solitaire treecardgames
      -download for spider solitaire microsoft store
      -download for spider solitaire google play
      -download for spider solitaire app store
      -download for spider solitaire apk
      -download for spider solitaire pc
      -download for spider solitaire mac
      -download for spider solitaire android
      -download for spider solitaire ios
      -download for spider solitaire iphone
      -download for spider solitaire ipad
      -download for spider solitaire laptop
      -download for spider solitaire desktop
      -download for spider solitaire online
      -download for spider solitaire free play
      -download for spider solitaire unlimited undo
      -download for spider solitaire daily challenge
      -download for spider solitaire winning deals
      -download for spider solitaire autoplay option
      -download for spider solitaire statistics tracking
      -download for spider solitaire save game progress
      -download for spider solitaire beautiful graphics
      -download for spider solitaire smooth animations
      -download for spider solitaire many card sets
      -download for spider solitaire many backgrounds
      -download for spider solitaire easy to use interface
      -download for spider solitaire fun and addictive gameplay
      -download for spider solitaire best brain exercise
      -download for spider solitaire how to play guide
      -download for spider solitaire tips and tricks
      -download for spider solitaire latest version
      -download for spider solitaire update
      -download for spider solitaire support

      -

      Spider Solitaire: free online card game, play full-screen without download

      -

      This website is another great option to play Spider Solitaire online and for free. It has a beautiful and elegant design, a responsive and fluid gameplay, and a range of options and features. You can choose from one, two, or four suits, undo your moves, use hints, change the card style, switch to full-screen mode, and more. You can also see your score, time, moves, and statistics. You can play Spider Solitaire on this website on any device that has a web browser.

      -

      Spider Solitaire (2 Suits)

      -

      This website is a good choice to play Spider Solitaire online and for free if you are looking for a medium level of difficulty. It offers Spider Solitaire with two suits only, which is more challenging than one suit but easier than four suits. It has a clean and simple interface, a smooth and fast gameplay, and some basic options and features. You can undo your moves, use hints, change the background color, and adjust the sound volume. You can also see your score, time, moves, and statistics. You can play Spider Solitaire on this website on any device that has a web browser.

      -

      How to improve your skills and strategies in Spider Solitaire?

      -

      Spider Solitaire is not only a game of luck but also a game of skill and strategy. If you want to improve your skills and strategies in Spider Solitaire, you need to practice regularly, learn from your mistakes, and apply some tips and tricks. Here are some of them:

      -

      The tips and tricks for winning Spider Solitaire

      - -

      The common mistakes and pitfalls to avoid in Spider Solitaire

      - -

      Conclusion

      -

      Spider Solitaire is a classic card game that you can download and play online and for free. It is a fun and challenging way to test your logic, patience, and concentration skills. It also has many benefits, such as improving your memory, mood, and mental health. In this article, we have told you everything you need to know about Spider Solitaire, including its history, rules, variations, benefits, tips, tricks, and more. We have also shown you the best websites to download and play Spider Solitaire online and for free. We hope you have enjoyed reading this article and learned something new. Now it's time to put your knowledge into practice and start playing Spider Solitaire online and for free!

      -

      Call to action

      -

      If you are ready to download and play Spider Solitaire online and for free, click on one of the links below and start having fun!

      -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/22h/vintedois-diffusion-v0-1/app.py b/spaces/22h/vintedois-diffusion-v0-1/app.py deleted file mode 100644 index 258d021a925f201a1b550f342cdb0e54b8f5a051..0000000000000000000000000000000000000000 --- a/spaces/22h/vintedois-diffusion-v0-1/app.py +++ /dev/null @@ -1,135 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, EulerAncestralDiscreteScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = '22h/vintedois-diffusion-v0-1' -prefix = '' - -scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=640, height=640, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
      -
      -

      22h Diffusion v0.1

      -
      -

      - Demo for 22h Diffusion v0-1 Stable Diffusion model.
      - {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

      - Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"}

      - Duplicate Space -
      - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=50, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=640, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=640, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
      -
      -

      This space was created using SD Space Creator.

      -
      - """) - -demo.queue(concurrency_count=1) -demo.launch() \ No newline at end of file diff --git a/spaces/ADOPLE/ResumeSummarizer/README.md b/spaces/ADOPLE/ResumeSummarizer/README.md deleted file mode 100644 index 93e79737e1cfd8e1281657df3ca307b9d156e68a..0000000000000000000000000000000000000000 --- a/spaces/ADOPLE/ResumeSummarizer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ResumeSummarizer -emoji: 👁 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -duplicated_from: randstad/ResumeSummarizer ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/wav_processors/__init__.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/wav_processors/__init__.py deleted file mode 100644 index 4be97b377dcb95a0e6bceb876ac0ce93c8290249..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/wav_processors/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from . import base_processor -from . import common_processors diff --git a/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/README.md b/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/README.md deleted file mode 100644 index 277ea157607d38b0c354d6c41600f058e43d1ad2..0000000000000000000000000000000000000000 --- a/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 2 BiomedEntityRecognition GR -emoji: ⚡ -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/settings/+page.server.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/settings/+page.server.ts deleted file mode 100644 index 9084fa8f983c65b2aa8f90c553e1b69006a6d2d1..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/settings/+page.server.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { base } from "$app/paths"; -import { collections } from "$lib/server/database"; -import { redirect } from "@sveltejs/kit"; -import { z } from "zod"; -import { models, validateModel } from "$lib/server/models"; -import { authCondition } from "$lib/server/auth"; -import { DEFAULT_SETTINGS } from "$lib/types/Settings"; - -export const actions = { - default: async function ({ request, locals }) { - const formData = await request.formData(); - - const { ethicsModalAccepted, ...settings } = z - .object({ - shareConversationsWithModelAuthors: z - .union([z.literal("true"), z.literal("on"), z.literal("false"), z.null()]) - .transform((value) => { - return value === "true" || value === "on"; - }), - ethicsModalAccepted: z.boolean({ coerce: true }).optional(), - activeModel: validateModel(models), - customPrompts: z.record(z.string()).default({}), - }) - .parse({ - shareConversationsWithModelAuthors: formData.get("shareConversationsWithModelAuthors"), - ethicsModalAccepted: formData.get("ethicsModalAccepted"), - activeModel: formData.get("activeModel") ?? DEFAULT_SETTINGS.activeModel, - customPrompts: JSON.parse(formData.get("customPrompts")?.toString() ?? "{}"), - }); - - throw redirect(303, request.headers.get("referer") || `${base}/`); - }, -}; diff --git a/spaces/Adapter/T2I-Adapter/ldm/modules/encoders/modules.py b/spaces/Adapter/T2I-Adapter/ldm/modules/encoders/modules.py deleted file mode 100644 index d59229ac1c97980e811e3b808f3431311c4f3b7d..0000000000000000000000000000000000000000 --- a/spaces/Adapter/T2I-Adapter/ldm/modules/encoders/modules.py +++ /dev/null @@ -1,441 +0,0 @@ -import torch -import torch.nn as nn -import math -from torch.utils.checkpoint import checkpoint - -from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, CLIPModel - -import open_clip -import re -from ldm.util import default, count_params - - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - -class IdentityEncoder(AbstractEncoder): - - def encode(self, x): - return x - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - - def forward(self, batch, key=None): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - c = self.embedding(c) - return c - - -class FrozenT5Embedder(AbstractEncoder): - """Uses the T5 transformer encoder for text""" - def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl - super().__init__() - self.tokenizer = T5Tokenizer.from_pretrained(version) - self.transformer = T5EncoderModel.from_pretrained(version) - self.device = device - self.max_length = max_length # TODO: typical value? - if freeze: - self.freeze() - - def freeze(self): - self.transformer = self.transformer.eval() - #self.train = disabled_train - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens) - - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from huggingface)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77, - freeze=True, layer="last"): # clip-vit-base-patch32 - super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPModel.from_pretrained(version).text_model - self.device = device - self.max_length = max_length - if freeze: - self.freeze() - self.layer = layer - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer != 'last') - - if self.layer == 'penultimate': - z = outputs.hidden_states[-2] - z = self.transformer.final_layer_norm(z) - else: - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - - -class FrozenOpenCLIPEmbedder(AbstractEncoder): - """ - Uses the OpenCLIP transformer encoder for text - """ - LAYERS = [ - #"pooled", - "last", - "penultimate" - ] - def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77, - freeze=True, layer="last"): - super().__init__() - assert layer in self.LAYERS - model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version) - del model.visual - self.model = model - - self.device = device - self.max_length = max_length - if freeze: - self.freeze() - self.layer = layer - if self.layer == "last": - self.layer_idx = 0 - elif self.layer == "penultimate": - self.layer_idx = 1 - else: - raise NotImplementedError() - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = open_clip.tokenize(text) - z = self.encode_with_transformer(tokens.to(self.device)) - return z - - def encode_with_transformer(self, text): - x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model] - x = x + self.model.positional_embedding - x = x.permute(1, 0, 2) # NLD -> LND - x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask) - x = x.permute(1, 0, 2) # LND -> NLD - x = self.model.ln_final(x) - return x - - def text_transformer_forward(self, x: torch.Tensor, attn_mask = None): - for i, r in enumerate(self.model.transformer.resblocks): - if i == len(self.model.transformer.resblocks) - self.layer_idx: - break - if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting(): - x = checkpoint(r, x, attn_mask) - else: - x = r(x, attn_mask=attn_mask) - return x - - def encode(self, text): - return self(text) - - -class FrozenCLIPT5Encoder(AbstractEncoder): - def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda", - clip_max_length=77, t5_max_length=77): - super().__init__() - self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length) - self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length) - print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, " - f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.") - - def encode(self, text): - return self(text) - - def forward(self, text): - clip_z = self.clip_encoder.encode(text) - t5_z = self.t5_encoder.encode(text) - return [clip_z, t5_z] - - -# code from sd-webui -re_attention = re.compile(r""" -\\\(| -\\\)| -\\\[| -\\]| -\\\\| -\\| -\(| -\[| -:([+-]?[.\d]+)\)| -\)| -]| -[^\\()\[\]:]+| -: -""", re.X) - - -def parse_prompt_attention(text): - """ - Parses a string with attention tokens and returns a list of pairs: text and its associated weight. - Accepted tokens are: - (abc) - increases attention to abc by a multiplier of 1.1 - (abc:3.12) - increases attention to abc by a multiplier of 3.12 - [abc] - decreases attention to abc by a multiplier of 1.1 - \( - literal character '(' - \[ - literal character '[' - \) - literal character ')' - \] - literal character ']' - \\ - literal character '\' - anything else - just text - - >>> parse_prompt_attention('normal text') - [['normal text', 1.0]] - >>> parse_prompt_attention('an (important) word') - [['an ', 1.0], ['important', 1.1], [' word', 1.0]] - >>> parse_prompt_attention('(unbalanced') - [['unbalanced', 1.1]] - >>> parse_prompt_attention('\(literal\]') - [['(literal]', 1.0]] - >>> parse_prompt_attention('(unnecessary)(parens)') - [['unnecessaryparens', 1.1]] - >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') - [['a ', 1.0], - ['house', 1.5730000000000004], - [' ', 1.1], - ['on', 1.0], - [' a ', 1.1], - ['hill', 0.55], - [', sun, ', 1.1], - ['sky', 1.4641000000000006], - ['.', 1.1]] - """ - - res = [] - round_brackets = [] - square_brackets = [] - - round_bracket_multiplier = 1.1 - square_bracket_multiplier = 1 / 1.1 - - def multiply_range(start_position, multiplier): - for p in range(start_position, len(res)): - res[p][1] *= multiplier - - for m in re_attention.finditer(text): - text = m.group(0) - weight = m.group(1) - - if text.startswith('\\'): - res.append([text[1:], 1.0]) - elif text == '(': - round_brackets.append(len(res)) - elif text == '[': - square_brackets.append(len(res)) - elif weight is not None and len(round_brackets) > 0: - multiply_range(round_brackets.pop(), float(weight)) - elif text == ')' and len(round_brackets) > 0: - multiply_range(round_brackets.pop(), round_bracket_multiplier) - elif text == ']' and len(square_brackets) > 0: - multiply_range(square_brackets.pop(), square_bracket_multiplier) - else: - res.append([text, 1.0]) - - for pos in round_brackets: - multiply_range(pos, round_bracket_multiplier) - - for pos in square_brackets: - multiply_range(pos, square_bracket_multiplier) - - if len(res) == 0: - res = [["", 1.0]] - - # merge runs of identical weights - i = 0 - while i + 1 < len(res): - if res[i][1] == res[i + 1][1]: - res[i][0] += res[i + 1][0] - res.pop(i + 1) - else: - i += 1 - - return res - -class WebUIFrozenCLIPEmebedder(AbstractEncoder): - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", freeze=True, layer="penultimate"): - super(WebUIFrozenCLIPEmebedder, self).__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPModel.from_pretrained(version).text_model - self.device = device - self.layer = layer - if freeze: - self.freeze() - - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] - self.comma_padding_backtrack = 20 - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def tokenize(self, texts): - tokenized = self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] - return tokenized - - def encode_with_transformers(self, tokens): - outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer!='last') - - if self.layer == 'penultimate': - z = outputs.hidden_states[-2] - z = self.transformer.final_layer_norm(z) - else: - z = outputs.last_hidden_state - - return z - - def tokenize_line(self, line): - parsed = parse_prompt_attention(line) - # print(parsed) - - tokenized = self.tokenize([text for text, _ in parsed]) - - remade_tokens = [] - multipliers = [] - last_comma = -1 - - for tokens, (text, weight) in zip(tokenized, parsed): - i = 0 - while i < len(tokens): - token = tokens[i] - - if token == self.comma_token: - last_comma = len(remade_tokens) - elif self.comma_padding_backtrack != 0 and max(len(remade_tokens), - 1) % 75 == 0 and last_comma != -1 and len( - remade_tokens) - last_comma <= self.comma_padding_backtrack: - last_comma += 1 - reloc_tokens = remade_tokens[last_comma:] - reloc_mults = multipliers[last_comma:] - - remade_tokens = remade_tokens[:last_comma] - length = len(remade_tokens) - - rem = int(math.ceil(length / 75)) * 75 - length - remade_tokens += [self.tokenizer.eos_token_id] * rem + reloc_tokens - multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults - - remade_tokens.append(token) - multipliers.append(weight) - i += 1 - - token_count = len(remade_tokens) - prompt_target_length = math.ceil(max(token_count, 1) / 75) * 75 - tokens_to_add = prompt_target_length - len(remade_tokens) - - remade_tokens = remade_tokens + [self.tokenizer.eos_token_id] * tokens_to_add - multipliers = multipliers + [1.0] * tokens_to_add - - return remade_tokens, multipliers, token_count - - def process_text(self, texts): - remade_batch_tokens = [] - token_count = 0 - - cache = {} - batch_multipliers = [] - for line in texts: - if line in cache: - remade_tokens, multipliers = cache[line] - else: - remade_tokens, multipliers, current_token_count = self.tokenize_line(line) - token_count = max(current_token_count, token_count) - - cache[line] = (remade_tokens, multipliers) - - remade_batch_tokens.append(remade_tokens) - batch_multipliers.append(multipliers) - - return batch_multipliers, remade_batch_tokens, token_count - - def process_tokens(self, remade_batch_tokens, batch_multipliers): - remade_batch_tokens = [[self.tokenizer.bos_token_id] + x[:75] + [self.tokenizer.eos_token_id] for x in remade_batch_tokens] - batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers] - - tokens = torch.asarray(remade_batch_tokens).to(self.device) - - z = self.encode_with_transformers(tokens) - - # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] - batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(self.device) - original_mean = z.mean() - z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) - new_mean = z.mean() - z *= original_mean / new_mean - - return z - - def forward(self, text): - batch_multipliers, remade_batch_tokens, token_count = self.process_text(text) - - z = None - i = 0 - while max(map(len, remade_batch_tokens)) != 0: - rem_tokens = [x[75:] for x in remade_batch_tokens] - rem_multipliers = [x[75:] for x in batch_multipliers] - - tokens = [] - multipliers = [] - for j in range(len(remade_batch_tokens)): - if len(remade_batch_tokens[j]) > 0: - tokens.append(remade_batch_tokens[j][:75]) - multipliers.append(batch_multipliers[j][:75]) - else: - tokens.append([self.tokenizer.eos_token_id] * 75) - multipliers.append([1.0] * 75) - - z1 = self.process_tokens(tokens, multipliers) - z = z1 if z is None else torch.cat((z, z1), axis=-2) - - remade_batch_tokens = rem_tokens - batch_multipliers = rem_multipliers - i += 1 - - return z - - def encode(self, text): - return self(text) - - - -if __name__ == "__main__": - model = FrozenCLIPEmbedder() - count_params(model, verbose=True) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/moveto-plugin.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/moveto-plugin.d.ts deleted file mode 100644 index bb12e9c1d7cc0ca65c3631c998ecfbf2f390580d..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/moveto-plugin.d.ts +++ /dev/null @@ -1,9 +0,0 @@ -import MoveTo from './moveto'; - -export default class MoveToPlugin extends Phaser.Plugins.BasePlugin { - add( - gameObject: Phaser.GameObjects.GameObject, - config?: MoveTo.IConfig - ): MoveTo; - -} \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.js deleted file mode 100644 index 70cf6df876d6e5f733628ebf2fb62652c1504e9d..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.js +++ /dev/null @@ -1,13 +0,0 @@ -import Custom from './Custom.js'; -import ObjectFactory from '../ObjectFactory.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('custom', function (config) { - var gameObject = new Custom(this.scene, config); - this.scene.add.existing(gameObject); - return gameObject; -}); - -SetValue(window, 'RexPlugins.Spinner.Custom', Custom); - -export default Custom; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunWidthWrap.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunWidthWrap.js deleted file mode 100644 index 1c489fdb634c903570b49e55f967fb7920406a67..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunWidthWrap.js +++ /dev/null @@ -1,23 +0,0 @@ -// Default method -var RunWidthWrap = function (parentWidth) { - var child, childWidth; - for (var i in this.sizerChildren) { - child = this.sizerChildren[i]; - if ( - (!child) || - (child.isRexSizer && child.ignoreLayout) || - (!child.runWidthWrap) - ) { - continue; - } - - childWidth = this.getExpandedChildWidth(child, parentWidth); - if (child.isRexSizer) { - childWidth = child.resolveWidth(childWidth); - } - child.runWidthWrap(childWidth); - } - return this; -} - -export default RunWidthWrap; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenSizers.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenSizers.js deleted file mode 100644 index 179c8baf0167d1378a80f701ca13048229791775..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenSizers.js +++ /dev/null @@ -1,15 +0,0 @@ -var GetChildrenSizers = function (out) { - if (out === undefined) { - out = []; - } - var children = this.sizerChildren, - child; - for (var i = 0, cnt = children.length; i < cnt; i++) { - child = children[i]; - if (child && child.isRexSizer) { - out.push(child); - } - } - return out; -} -export default GetChildrenSizers; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollBar.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollBar.js deleted file mode 100644 index 5805834b863db6072a9a296fe90bb7ea2ecc505d..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollBar.js +++ /dev/null @@ -1,26 +0,0 @@ -import MergeStyle from './utils/MergeStyle.js'; -import ScrollBar from '../../scrollbar/ScrollBar.js'; -import CreateChild from './utils/CreateChild.js'; -import ReplaceSliderConfig from './utils/ReplaceSliderConfig.js'; - -var CreateScrollBar = function (scene, data, view, styles, customBuilders) { - data = MergeStyle(data, styles); - - // Replace data by child game object - CreateChild(scene, data, 'background', view, styles, customBuilders); - ReplaceSliderConfig(scene, data.slider, view, styles, customBuilders); - - var buttonsConfig = data.buttons; - if (buttonsConfig) { - CreateChild(scene, buttonsConfig, 'top', view, styles, customBuilders); - CreateChild(scene, buttonsConfig, 'bottom', view, styles, customBuilders); - CreateChild(scene, buttonsConfig, 'left', view, styles, customBuilders); - CreateChild(scene, buttonsConfig, 'right', view, styles, customBuilders); - } - - var gameObject = new ScrollBar(scene, data); - scene.add.existing(gameObject); - return gameObject; -}; - -export default CreateScrollBar; \ No newline at end of file diff --git a/spaces/Aki004/herta-so-vits/preprocess_hubert_f0.py b/spaces/Aki004/herta-so-vits/preprocess_hubert_f0.py deleted file mode 100644 index 763fb0d65540ed4d62b269914e81c740f3ff6bba..0000000000000000000000000000000000000000 --- a/spaces/Aki004/herta-so-vits/preprocess_hubert_f0.py +++ /dev/null @@ -1,101 +0,0 @@ -import math -import multiprocessing -import os -import argparse -from random import shuffle - -import torch -from glob import glob -from tqdm import tqdm -from modules.mel_processing import spectrogram_torch - -import utils -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -import librosa -import numpy as np - -hps = utils.get_hparams_from_file("configs/config.json") -sampling_rate = hps.data.sampling_rate -hop_length = hps.data.hop_length - - -def process_one(filename, hmodel): - # print(filename) - wav, sr = librosa.load(filename, sr=sampling_rate) - soft_path = filename + ".soft.pt" - if not os.path.exists(soft_path): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(device) - c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k) - torch.save(c.cpu(), soft_path) - - f0_path = filename + ".f0.npy" - if not os.path.exists(f0_path): - f0 = utils.compute_f0_dio( - wav, sampling_rate=sampling_rate, hop_length=hop_length - ) - np.save(f0_path, f0) - - spec_path = filename.replace(".wav", ".spec.pt") - if not os.path.exists(spec_path): - # Process spectrogram - # The following code can't be replaced by torch.FloatTensor(wav) - # because load_wav_to_torch return a tensor that need to be normalized - - audio, sr = utils.load_wav_to_torch(filename) - if sr != hps.data.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sr, hps.data.sampling_rate - ) - ) - - audio_norm = audio / hps.data.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - - spec = spectrogram_torch( - audio_norm, - hps.data.filter_length, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_path) - - -def process_batch(filenames): - print("Loading hubert for content...") - device = "cuda" if torch.cuda.is_available() else "cpu" - hmodel = utils.get_hubert_model().to(device) - print("Loaded hubert.") - for filename in tqdm(filenames): - process_one(filename, hmodel) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--in_dir", type=str, default="dataset/44k", help="path to input dir" - ) - - args = parser.parse_args() - filenames = glob(f"{args.in_dir}/*/*.wav", recursive=True) # [:10] - shuffle(filenames) - multiprocessing.set_start_method("spawn", force=True) - - num_processes = 1 - chunk_size = int(math.ceil(len(filenames) / num_processes)) - chunks = [ - filenames[i : i + chunk_size] for i in range(0, len(filenames), chunk_size) - ] - print([len(c) for c in chunks]) - processes = [ - multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks - ] - for p in processes: - p.start() diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/optimizer.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/optimizer.py deleted file mode 100644 index cae5ffff3d11aaccd705d6936e080175ab97dd0e..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/optimizer.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Helper wrapper for a Tensorflow optimizer.""" - -import platform -import numpy as np -import tensorflow as tf - -from collections import OrderedDict -from typing import List, Union - -from . import autosummary -from . import tfutil -from .. import util - -from .tfutil import TfExpression, TfExpressionEx - -_collective_ops_warning_printed = False -_collective_ops_group_key = 831766147 -_collective_ops_instance_key = 436340067 - -class Optimizer: - """A Wrapper for tf.train.Optimizer. - - Automatically takes care of: - - Gradient averaging for multi-GPU training. - - Gradient accumulation for arbitrarily large minibatches. - - Dynamic loss scaling and typecasts for FP16 training. - - Ignoring corrupted gradients that contain NaNs/Infs. - - Reporting statistics. - - Well-chosen default settings. - """ - - def __init__(self, - name: str = "Train", # Name string that will appear in TensorFlow graph. - tf_optimizer: str = "tf.train.AdamOptimizer", # Underlying optimizer class. - learning_rate: TfExpressionEx = 0.001, # Learning rate. Can vary over time. - minibatch_multiplier: TfExpressionEx = None, # Treat N consecutive minibatches as one by accumulating gradients. - share: "Optimizer" = None, # Share internal state with a previously created optimizer? - use_loss_scaling: bool = False, # Enable dynamic loss scaling for robust mixed-precision training? - loss_scaling_init: float = 64.0, # Log2 of initial loss scaling factor. - loss_scaling_inc: float = 0.0005, # Log2 of per-minibatch loss scaling increment when there is no overflow. - loss_scaling_dec: float = 1.0, # Log2 of per-minibatch loss scaling decrement when there is an overflow. - report_mem_usage: bool = False, # Report fine-grained memory usage statistics in TensorBoard? - **kwargs): - - # Public fields. - self.name = name - self.learning_rate = learning_rate - self.minibatch_multiplier = minibatch_multiplier - self.id = self.name.replace("/", ".") - self.scope = tf.get_default_graph().unique_name(self.id) - self.optimizer_class = util.get_obj_by_name(tf_optimizer) - self.optimizer_kwargs = dict(kwargs) - self.use_loss_scaling = use_loss_scaling - self.loss_scaling_init = loss_scaling_init - self.loss_scaling_inc = loss_scaling_inc - self.loss_scaling_dec = loss_scaling_dec - - # Private fields. - self._updates_applied = False - self._devices = OrderedDict() # device_name => EasyDict() - self._shared_optimizers = OrderedDict() # device_name => optimizer_class - self._gradient_shapes = None # [shape, ...] - self._report_mem_usage = report_mem_usage - - # Validate arguments. - assert callable(self.optimizer_class) - - # Share internal state if requested. - if share is not None: - assert isinstance(share, Optimizer) - assert self.optimizer_class is share.optimizer_class - assert self.learning_rate is share.learning_rate - assert self.optimizer_kwargs == share.optimizer_kwargs - self._shared_optimizers = share._shared_optimizers # pylint: disable=protected-access - - def _get_device(self, device_name: str): - """Get internal state for the given TensorFlow device.""" - tfutil.assert_tf_initialized() - if device_name in self._devices: - return self._devices[device_name] - - # Initialize fields. - device = util.EasyDict() - device.name = device_name - device.optimizer = None # Underlying optimizer: optimizer_class - device.loss_scaling_var = None # Log2 of loss scaling: tf.Variable - device.grad_raw = OrderedDict() # Raw gradients: var => [grad, ...] - device.grad_clean = OrderedDict() # Clean gradients: var => grad - device.grad_acc_vars = OrderedDict() # Accumulation sums: var => tf.Variable - device.grad_acc_count = None # Accumulation counter: tf.Variable - device.grad_acc = OrderedDict() # Accumulated gradients: var => grad - - # Setup TensorFlow objects. - with tfutil.absolute_name_scope(self.scope + "/Devices"), tf.device(device_name), tf.control_dependencies(None): - if device_name not in self._shared_optimizers: - optimizer_name = self.scope.replace("/", "_") + "_opt%d" % len(self._shared_optimizers) - self._shared_optimizers[device_name] = self.optimizer_class(name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) - device.optimizer = self._shared_optimizers[device_name] - if self.use_loss_scaling: - device.loss_scaling_var = tf.Variable(np.float32(self.loss_scaling_init), trainable=False, name="loss_scaling_var") - - # Register device. - self._devices[device_name] = device - return device - - def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None: - """Register the gradients of the given loss function with respect to the given variables. - Intended to be called once per GPU.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - device = self._get_device(loss.device) - - # Validate trainables. - if isinstance(trainable_vars, dict): - trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars - assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1 - assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss]) - assert all(var.device == device.name for var in trainable_vars) - - # Validate shapes. - if self._gradient_shapes is None: - self._gradient_shapes = [var.shape.as_list() for var in trainable_vars] - assert len(trainable_vars) == len(self._gradient_shapes) - assert all(var.shape.as_list() == var_shape for var, var_shape in zip(trainable_vars, self._gradient_shapes)) - - # Report memory usage if requested. - deps = [loss] - if self._report_mem_usage: - self._report_mem_usage = False - try: - with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]): - deps.append(autosummary.autosummary(self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30)) - except tf.errors.NotFoundError: - pass - - # Compute gradients. - with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps): - loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) - gate = tf.train.Optimizer.GATE_NONE # disable gating to reduce memory usage - grad_list = device.optimizer.compute_gradients(loss=loss, var_list=trainable_vars, gate_gradients=gate) - - # Register gradients. - for grad, var in grad_list: - if var not in device.grad_raw: - device.grad_raw[var] = [] - device.grad_raw[var].append(grad) - - def apply_updates(self, allow_no_op: bool = False) -> tf.Operation: - """Construct training op to update the registered variables based on their gradients.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - self._updates_applied = True - all_ops = [] - - # Check for no-op. - if allow_no_op and len(self._devices) == 0: - with tfutil.absolute_name_scope(self.scope): - return tf.no_op(name='TrainingOp') - - # Clean up gradients. - for device_idx, device in enumerate(self._devices.values()): - with tfutil.absolute_name_scope(self.scope + "/Clean%d" % device_idx), tf.device(device.name): - for var, grad in device.grad_raw.items(): - - # Filter out disconnected gradients and convert to float32. - grad = [g for g in grad if g is not None] - grad = [tf.cast(g, tf.float32) for g in grad] - - # Sum within the device. - if len(grad) == 0: - grad = tf.zeros(var.shape) # No gradients => zero. - elif len(grad) == 1: - grad = grad[0] # Single gradient => use as is. - else: - grad = tf.add_n(grad) # Multiple gradients => sum. - - # Scale as needed. - scale = 1.0 / len(device.grad_raw[var]) / len(self._devices) - scale = tf.constant(scale, dtype=tf.float32, name="scale") - if self.minibatch_multiplier is not None: - scale /= tf.cast(self.minibatch_multiplier, tf.float32) - scale = self.undo_loss_scaling(scale) - device.grad_clean[var] = grad * scale - - # Sum gradients across devices. - if len(self._devices) > 1: - with tfutil.absolute_name_scope(self.scope + "/Broadcast"), tf.device(None): - if platform.system() == "Windows": # Windows => NCCL ops are not available. - self._broadcast_fallback() - elif tf.VERSION.startswith("1.15."): # TF 1.15 => NCCL ops are broken: https://github.com/tensorflow/tensorflow/issues/41539 - self._broadcast_fallback() - else: # Otherwise => NCCL ops are safe to use. - self._broadcast_nccl() - - # Apply updates separately on each device. - for device_idx, device in enumerate(self._devices.values()): - with tfutil.absolute_name_scope(self.scope + "/Apply%d" % device_idx), tf.device(device.name): - # pylint: disable=cell-var-from-loop - - # Accumulate gradients over time. - if self.minibatch_multiplier is None: - acc_ok = tf.constant(True, name='acc_ok') - device.grad_acc = OrderedDict(device.grad_clean) - else: - # Create variables. - with tf.control_dependencies(None): - for var in device.grad_clean.keys(): - device.grad_acc_vars[var] = tf.Variable(tf.zeros(var.shape), trainable=False, name="grad_acc_var") - device.grad_acc_count = tf.Variable(tf.zeros([]), trainable=False, name="grad_acc_count") - - # Track counter. - count_cur = device.grad_acc_count + 1.0 - count_inc_op = lambda: tf.assign(device.grad_acc_count, count_cur) - count_reset_op = lambda: tf.assign(device.grad_acc_count, tf.zeros([])) - acc_ok = (count_cur >= tf.cast(self.minibatch_multiplier, tf.float32)) - all_ops.append(tf.cond(acc_ok, count_reset_op, count_inc_op)) - - # Track gradients. - for var, grad in device.grad_clean.items(): - acc_var = device.grad_acc_vars[var] - acc_cur = acc_var + grad - device.grad_acc[var] = acc_cur - with tf.control_dependencies([acc_cur]): - acc_inc_op = lambda: tf.assign(acc_var, acc_cur) - acc_reset_op = lambda: tf.assign(acc_var, tf.zeros(var.shape)) - all_ops.append(tf.cond(acc_ok, acc_reset_op, acc_inc_op)) - - # No overflow => apply gradients. - all_ok = tf.reduce_all(tf.stack([acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()])) - apply_op = lambda: device.optimizer.apply_gradients([(tf.cast(grad, var.dtype), var) for var, grad in device.grad_acc.items()]) - all_ops.append(tf.cond(all_ok, apply_op, tf.no_op)) - - # Adjust loss scaling. - if self.use_loss_scaling: - ls_inc_op = lambda: tf.assign_add(device.loss_scaling_var, self.loss_scaling_inc) - ls_dec_op = lambda: tf.assign_sub(device.loss_scaling_var, self.loss_scaling_dec) - ls_update_op = lambda: tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op)) - all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op)) - - # Last device => report statistics. - if device_idx == len(self._devices) - 1: - all_ops.append(autosummary.autosummary(self.id + "/learning_rate", tf.convert_to_tensor(self.learning_rate))) - all_ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(all_ok, 0, 1), condition=acc_ok)) - if self.use_loss_scaling: - all_ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", device.loss_scaling_var)) - - # Initialize variables. - self.reset_optimizer_state() - if self.use_loss_scaling: - tfutil.init_uninitialized_vars([device.loss_scaling_var for device in self._devices.values()]) - if self.minibatch_multiplier is not None: - tfutil.run([var.initializer for device in self._devices.values() for var in list(device.grad_acc_vars.values()) + [device.grad_acc_count]]) - - # Group everything into a single op. - with tfutil.absolute_name_scope(self.scope): - return tf.group(*all_ops, name="TrainingOp") - - def reset_optimizer_state(self) -> None: - """Reset internal state of the underlying optimizer.""" - tfutil.assert_tf_initialized() - tfutil.run([var.initializer for device in self._devices.values() for var in device.optimizer.variables()]) - - def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]: - """Get or create variable representing log2 of the current dynamic loss scaling factor.""" - return self._get_device(device).loss_scaling_var - - def apply_loss_scaling(self, value: TfExpression) -> TfExpression: - """Apply dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * tfutil.exp2(self.get_loss_scaling_var(value.device)) - - def undo_loss_scaling(self, value: TfExpression) -> TfExpression: - """Undo the effect of dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type - - def _broadcast_nccl(self): - """Sum gradients across devices using NCCL ops (fast path).""" - from tensorflow.python.ops import nccl_ops # pylint: disable=no-name-in-module - for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]): - if any(x.shape.num_elements() > 0 for x in all_vars): - all_grads = [device.grad_clean[var] for device, var in zip(self._devices.values(), all_vars)] - all_grads = nccl_ops.all_sum(all_grads) - for device, var, grad in zip(self._devices.values(), all_vars, all_grads): - device.grad_clean[var] = grad - - def _broadcast_fallback(self): - """Sum gradients across devices using TensorFlow collective ops (slow fallback path).""" - from tensorflow.python.ops import collective_ops # pylint: disable=no-name-in-module - global _collective_ops_warning_printed, _collective_ops_group_key, _collective_ops_instance_key - if all(x.shape.num_elements() == 0 for device in self._devices.values() for x in device.grad_clean.values()): - return - if not _collective_ops_warning_printed: - print("------------------------------------------------------------------------") - print("WARNING: Using slow fallback implementation for inter-GPU communication.") - print("Please use TensorFlow 1.14 on Linux for optimal training performance.") - print("------------------------------------------------------------------------") - _collective_ops_warning_printed = True - for device in self._devices.values(): - with tf.device(device.name): - combo = [tf.reshape(x, [x.shape.num_elements()]) for x in device.grad_clean.values()] - combo = tf.concat(combo, axis=0) - combo = collective_ops.all_reduce(combo, merge_op='Add', final_op='Id', - group_size=len(self._devices), group_key=_collective_ops_group_key, - instance_key=_collective_ops_instance_key) - cur_ofs = 0 - for var, grad_old in device.grad_clean.items(): - grad_new = tf.reshape(combo[cur_ofs : cur_ofs + grad_old.shape.num_elements()], grad_old.shape) - cur_ofs += grad_old.shape.num_elements() - device.grad_clean[var] = grad_new - _collective_ops_instance_key += 1 - - -class SimpleAdam: - """Simplified version of tf.train.AdamOptimizer that behaves identically when used with dnnlib.tflib.Optimizer.""" - - def __init__(self, name="Adam", learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): - self.name = name - self.learning_rate = learning_rate - self.beta1 = beta1 - self.beta2 = beta2 - self.epsilon = epsilon - self.all_state_vars = [] - - def variables(self): - return self.all_state_vars - - def compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_NONE): - assert gate_gradients == tf.train.Optimizer.GATE_NONE - return list(zip(tf.gradients(loss, var_list), var_list)) - - def apply_gradients(self, grads_and_vars): - with tf.name_scope(self.name): - state_vars = [] - update_ops = [] - - # Adjust learning rate to deal with startup bias. - with tf.control_dependencies(None): - b1pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False) - b2pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False) - state_vars += [b1pow_var, b2pow_var] - b1pow_new = b1pow_var * self.beta1 - b2pow_new = b2pow_var * self.beta2 - update_ops += [tf.assign(b1pow_var, b1pow_new), tf.assign(b2pow_var, b2pow_new)] - lr_new = self.learning_rate * tf.sqrt(1 - b2pow_new) / (1 - b1pow_new) - - # Construct ops to update each variable. - for grad, var in grads_and_vars: - with tf.control_dependencies(None): - m_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) - v_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) - state_vars += [m_var, v_var] - m_new = self.beta1 * m_var + (1 - self.beta1) * grad - v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad) - var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon) - update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, v_new), tf.assign_sub(var, var_delta)] - - # Group everything together. - self.all_state_vars += state_vars - return tf.group(*update_ops) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cpu/Dockerfile b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cpu/Dockerfile deleted file mode 100644 index 127c61a719c5f43cf10561e1e64123799ce62402..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cpu/Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -FROM ubuntu:20.04 -LABEL maintainer="Hugging Face" -LABEL repository="diffusers" - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt update && \ - apt install -y bash \ - build-essential \ - git \ - git-lfs \ - curl \ - ca-certificates \ - libsndfile1-dev \ - python3.8 \ - python3-pip \ - libgl1 \ - python3.8-venv && \ - rm -rf /var/lib/apt/lists - -# make sure to use venv -RUN python3 -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) -RUN python3 -m pip install --no-cache-dir --upgrade pip && \ - python3 -m pip install --no-cache-dir \ - torch \ - torchvision \ - torchaudio \ - invisible_watermark \ - --extra-index-url https://download.pytorch.org/whl/cpu && \ - python3 -m pip install --no-cache-dir \ - accelerate \ - datasets \ - hf-doc-builder \ - huggingface-hub \ - Jinja2 \ - librosa \ - numpy \ - scipy \ - tensorboard \ - transformers - -CMD ["/bin/bash"] diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py deleted file mode 100644 index 92d42bf0c75eb060548176f3bb8d003bc7bf7afd..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright 2023 Kakao Brain and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import List, Optional, Tuple, Union - -import torch -from torch.nn import functional as F -from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from transformers.models.clip.modeling_clip import CLIPTextModelOutput - -from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel -from ...schedulers import UnCLIPScheduler -from ...utils import logging, randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from .text_proj import UnCLIPTextProjModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class UnCLIPPipeline(DiffusionPipeline): - """ - Pipeline for text-to-image generation using unCLIP. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Args: - text_encoder ([`~transformers.CLIPTextModelWithProjection`]): - Frozen text-encoder. - tokenizer ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - prior ([`PriorTransformer`]): - The canonical unCLIP prior to approximate the image embedding from the text embedding. - text_proj ([`UnCLIPTextProjModel`]): - Utility class to prepare and combine the embeddings before they are passed to the decoder. - decoder ([`UNet2DConditionModel`]): - The decoder to invert the image embedding into an image. - super_res_first ([`UNet2DModel`]): - Super resolution UNet. Used in all but the last step of the super resolution diffusion process. - super_res_last ([`UNet2DModel`]): - Super resolution UNet. Used in the last step of the super resolution diffusion process. - prior_scheduler ([`UnCLIPScheduler`]): - Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]). - decoder_scheduler ([`UnCLIPScheduler`]): - Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). - super_res_scheduler ([`UnCLIPScheduler`]): - Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). - - """ - - _exclude_from_cpu_offload = ["prior"] - - prior: PriorTransformer - decoder: UNet2DConditionModel - text_proj: UnCLIPTextProjModel - text_encoder: CLIPTextModelWithProjection - tokenizer: CLIPTokenizer - super_res_first: UNet2DModel - super_res_last: UNet2DModel - - prior_scheduler: UnCLIPScheduler - decoder_scheduler: UnCLIPScheduler - super_res_scheduler: UnCLIPScheduler - - def __init__( - self, - prior: PriorTransformer, - decoder: UNet2DConditionModel, - text_encoder: CLIPTextModelWithProjection, - tokenizer: CLIPTokenizer, - text_proj: UnCLIPTextProjModel, - super_res_first: UNet2DModel, - super_res_last: UNet2DModel, - prior_scheduler: UnCLIPScheduler, - decoder_scheduler: UnCLIPScheduler, - super_res_scheduler: UnCLIPScheduler, - ): - super().__init__() - - self.register_modules( - prior=prior, - decoder=decoder, - text_encoder=text_encoder, - tokenizer=tokenizer, - text_proj=text_proj, - super_res_first=super_res_first, - super_res_last=super_res_last, - prior_scheduler=prior_scheduler, - decoder_scheduler=decoder_scheduler, - super_res_scheduler=super_res_scheduler, - ) - - def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - latents = latents * scheduler.init_noise_sigma - return latents - - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, - text_attention_mask: Optional[torch.Tensor] = None, - ): - if text_model_output is None: - batch_size = len(prompt) if isinstance(prompt, list) else 1 - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - text_mask = text_inputs.attention_mask.bool().to(device) - - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] - - text_encoder_output = self.text_encoder(text_input_ids.to(device)) - - prompt_embeds = text_encoder_output.text_embeds - text_encoder_hidden_states = text_encoder_output.last_hidden_state - - else: - batch_size = text_model_output[0].shape[0] - prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1] - text_mask = text_attention_mask - - prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) - text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) - text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) - - if do_classifier_free_guidance: - uncond_tokens = [""] * batch_size - - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - uncond_text_mask = uncond_input.attention_mask.bool().to(device) - negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) - - negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds - uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - - seq_len = negative_prompt_embeds.shape[1] - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) - - seq_len = uncond_text_encoder_hidden_states.shape[1] - uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) - uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( - batch_size * num_images_per_prompt, seq_len, -1 - ) - uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) - - # done duplicates - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) - - text_mask = torch.cat([uncond_text_mask, text_mask]) - - return prompt_embeds, text_encoder_hidden_states, text_mask - - @torch.no_grad() - def __call__( - self, - prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: int = 1, - prior_num_inference_steps: int = 25, - decoder_num_inference_steps: int = 25, - super_res_num_inference_steps: int = 7, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - prior_latents: Optional[torch.FloatTensor] = None, - decoder_latents: Optional[torch.FloatTensor] = None, - super_res_latents: Optional[torch.FloatTensor] = None, - text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, - text_attention_mask: Optional[torch.Tensor] = None, - prior_guidance_scale: float = 4.0, - decoder_guidance_scale: float = 8.0, - output_type: Optional[str] = "pil", - return_dict: bool = True, - ): - """ - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output` - and `text_attention_mask` is passed. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - prior_num_inference_steps (`int`, *optional*, defaults to 25): - The number of denoising steps for the prior. More denoising steps usually lead to a higher quality - image at the expense of slower inference. - decoder_num_inference_steps (`int`, *optional*, defaults to 25): - The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality - image at the expense of slower inference. - super_res_num_inference_steps (`int`, *optional*, defaults to 7): - The number of denoising steps for super resolution. More denoising steps usually lead to a higher - quality image at the expense of slower inference. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*): - Pre-generated noisy latents to be used as inputs for the prior. - decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*): - Pre-generated noisy latents to be used as inputs for the decoder. - super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*): - Pre-generated noisy latents to be used as inputs for the decoder. - prior_guidance_scale (`float`, *optional*, defaults to 4.0): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - decoder_guidance_scale (`float`, *optional*, defaults to 4.0): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - text_model_output (`CLIPTextModelOutput`, *optional*): - Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text - outputs can be passed for tasks like text embedding interpolations. Make sure to also pass - `text_attention_mask` in this case. `prompt` can the be left `None`. - text_attention_mask (`torch.Tensor`, *optional*): - Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention - masks are necessary when passing `text_model_output`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipelines.ImagePipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is - returned where the first element is a list with the generated images. - """ - if prompt is not None: - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - else: - batch_size = text_model_output[0].shape[0] - - device = self._execution_device - - batch_size = batch_size * num_images_per_prompt - - do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 - - prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( - prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask - ) - - # prior - - self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) - prior_timesteps_tensor = self.prior_scheduler.timesteps - - embedding_dim = self.prior.config.embedding_dim - - prior_latents = self.prepare_latents( - (batch_size, embedding_dim), - prompt_embeds.dtype, - device, - generator, - prior_latents, - self.prior_scheduler, - ) - - for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents - - predicted_image_embedding = self.prior( - latent_model_input, - timestep=t, - proj_embedding=prompt_embeds, - encoder_hidden_states=text_encoder_hidden_states, - attention_mask=text_mask, - ).predicted_image_embedding - - if do_classifier_free_guidance: - predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) - predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( - predicted_image_embedding_text - predicted_image_embedding_uncond - ) - - if i + 1 == prior_timesteps_tensor.shape[0]: - prev_timestep = None - else: - prev_timestep = prior_timesteps_tensor[i + 1] - - prior_latents = self.prior_scheduler.step( - predicted_image_embedding, - timestep=t, - sample=prior_latents, - generator=generator, - prev_timestep=prev_timestep, - ).prev_sample - - prior_latents = self.prior.post_process_latents(prior_latents) - - image_embeddings = prior_latents - - # done prior - - # decoder - - text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( - image_embeddings=image_embeddings, - prompt_embeds=prompt_embeds, - text_encoder_hidden_states=text_encoder_hidden_states, - do_classifier_free_guidance=do_classifier_free_guidance, - ) - - if device.type == "mps": - # HACK: MPS: There is a panic when padding bool tensors, - # so cast to int tensor for the pad and back to bool afterwards - text_mask = text_mask.type(torch.int) - decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) - decoder_text_mask = decoder_text_mask.type(torch.bool) - else: - decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) - - self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) - decoder_timesteps_tensor = self.decoder_scheduler.timesteps - - num_channels_latents = self.decoder.config.in_channels - height = self.decoder.config.sample_size - width = self.decoder.config.sample_size - - decoder_latents = self.prepare_latents( - (batch_size, num_channels_latents, height, width), - text_encoder_hidden_states.dtype, - device, - generator, - decoder_latents, - self.decoder_scheduler, - ) - - for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents - - noise_pred = self.decoder( - sample=latent_model_input, - timestep=t, - encoder_hidden_states=text_encoder_hidden_states, - class_labels=additive_clip_time_embeddings, - attention_mask=decoder_text_mask, - ).sample - - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) - noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) - noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) - noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) - - if i + 1 == decoder_timesteps_tensor.shape[0]: - prev_timestep = None - else: - prev_timestep = decoder_timesteps_tensor[i + 1] - - # compute the previous noisy sample x_t -> x_t-1 - decoder_latents = self.decoder_scheduler.step( - noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator - ).prev_sample - - decoder_latents = decoder_latents.clamp(-1, 1) - - image_small = decoder_latents - - # done decoder - - # super res - - self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) - super_res_timesteps_tensor = self.super_res_scheduler.timesteps - - channels = self.super_res_first.config.in_channels // 2 - height = self.super_res_first.config.sample_size - width = self.super_res_first.config.sample_size - - super_res_latents = self.prepare_latents( - (batch_size, channels, height, width), - image_small.dtype, - device, - generator, - super_res_latents, - self.super_res_scheduler, - ) - - if device.type == "mps": - # MPS does not support many interpolations - image_upscaled = F.interpolate(image_small, size=[height, width]) - else: - interpolate_antialias = {} - if "antialias" in inspect.signature(F.interpolate).parameters: - interpolate_antialias["antialias"] = True - - image_upscaled = F.interpolate( - image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias - ) - - for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): - # no classifier free guidance - - if i == super_res_timesteps_tensor.shape[0] - 1: - unet = self.super_res_last - else: - unet = self.super_res_first - - latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) - - noise_pred = unet( - sample=latent_model_input, - timestep=t, - ).sample - - if i + 1 == super_res_timesteps_tensor.shape[0]: - prev_timestep = None - else: - prev_timestep = super_res_timesteps_tensor[i + 1] - - # compute the previous noisy sample x_t -> x_t-1 - super_res_latents = self.super_res_scheduler.step( - noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator - ).prev_sample - - image = super_res_latents - # done super res - - # post processing - - image = image * 0.5 + 0.5 - image = image.clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index d2edab113649c38cac3c7dc3ff425462f7c40ffd..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 93b7d51912abaaab55ceac5263737d02cd4e99fa..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnext101_32x8d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=8, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - style='pytorch')) - -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], - std=[57.375, 57.120, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_8x8_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_8x8_coco.py deleted file mode 100644 index b3adcb74a6155a0ab7303ab9ae90ee120f3eb4ad..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_8x8_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = 'yolact_r50_1x8_coco.py' - -optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[20, 42, 49, 52]) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/ext_loader.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/ext_loader.py deleted file mode 100644 index 08132d2c1b9a1c28880e4bab4d4fa1ba39d9d083..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/ext_loader.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import importlib -import os -import pkgutil -import warnings -from collections import namedtuple - -import torch - -if torch.__version__ != 'parrots': - - def load_ext(name, funcs): - ext = importlib.import_module('mmcv.' + name) - for fun in funcs: - assert hasattr(ext, fun), f'{fun} miss in module {name}' - return ext -else: - from parrots import extension - from parrots.base import ParrotsException - - has_return_value_ops = [ - 'nms', - 'softnms', - 'nms_match', - 'nms_rotated', - 'top_pool_forward', - 'top_pool_backward', - 'bottom_pool_forward', - 'bottom_pool_backward', - 'left_pool_forward', - 'left_pool_backward', - 'right_pool_forward', - 'right_pool_backward', - 'fused_bias_leakyrelu', - 'upfirdn2d', - 'ms_deform_attn_forward', - 'pixel_group', - 'contour_expand', - ] - - def get_fake_func(name, e): - - def fake_func(*args, **kwargs): - warnings.warn(f'{name} is not supported in parrots now') - raise e - - return fake_func - - def load_ext(name, funcs): - ExtModule = namedtuple('ExtModule', funcs) - ext_list = [] - lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - for fun in funcs: - try: - ext_fun = extension.load(fun, name, lib_dir=lib_root) - except ParrotsException as e: - if 'No element registered' not in e.message: - warnings.warn(e.message) - ext_fun = get_fake_func(fun, e) - ext_list.append(ext_fun) - else: - if fun in has_return_value_ops: - ext_list.append(ext_fun.op) - else: - ext_list.append(ext_fun.op_) - return ExtModule(*ext_list) - - -def check_ops_exist(): - ext_loader = pkgutil.find_loader('mmcv._ext') - return ext_loader is not None diff --git a/spaces/Artrajz/vits-simple-api/utils/data_utils.py b/spaces/Artrajz/vits-simple-api/utils/data_utils.py deleted file mode 100644 index f1abf47484f4705d00f7e9a719e0f2577cffda22..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/utils/data_utils.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import logging - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() - - -def load_checkpoint(checkpoint_path, model): - from torch import load - checkpoint_dict = load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict.get('iteration', None) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logging.info(f"{k} is not in the checkpoint") - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - if iteration: - logging.info(f"Loaded checkpoint '{checkpoint_path}' (iteration {iteration})") - else: - logging.info(f"Loaded checkpoint '{checkpoint_path}'") - return - - -def get_hparams_from_file(config_path): - from json import loads - with open(config_path, 'r', encoding='utf-8') as f: - data = f.read() - config = loads(data) - - hparams = HParams(**config) - return hparams - - -def load_audio_to_torch(full_path, target_sampling_rate): - import librosa - from torch import FloatTensor - from numpy import float32 - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return FloatTensor(audio.astype(float32)) - - -def clean_folder(folder_path): - for filename in os.listdir(folder_path): - file_path = os.path.join(folder_path, filename) - # 如果是文件,则删除文件。如果是文件夹则跳过。 - if os.path.isfile(file_path): - os.remove(file_path) - - -def check_is_none(item) -> bool: - # none -> True, not none -> False - return item is None or (isinstance(item, str) and str(item).isspace()) or str(item) == "" - - -def save_audio(audio, path): - with open(path, "wb") as f: - f.write(audio) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/align.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/align.py deleted file mode 100644 index c310b66e783820e5596bee9e4d92e531d59d6dc9..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/align.py +++ /dev/null @@ -1,311 +0,0 @@ -import sys -from itertools import chain -from typing import TYPE_CHECKING, Iterable, Optional - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from pip._vendor.typing_extensions import Literal # pragma: no cover - -from .constrain import Constrain -from .jupyter import JupyterMixin -from .measure import Measurement -from .segment import Segment -from .style import StyleType - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType, RenderResult - -AlignMethod = Literal["left", "center", "right"] -VerticalAlignMethod = Literal["top", "middle", "bottom"] - - -class Align(JupyterMixin): - """Align a renderable by adding spaces if necessary. - - Args: - renderable (RenderableType): A console renderable. - align (AlignMethod): One of "left", "center", or "right"" - style (StyleType, optional): An optional style to apply to the background. - vertical (Optional[VerticalAlginMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None. - pad (bool, optional): Pad the right with spaces. Defaults to True. - width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None. - height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None. - - Raises: - ValueError: if ``align`` is not one of the expected values. - """ - - def __init__( - self, - renderable: "RenderableType", - align: AlignMethod = "left", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> None: - if align not in ("left", "center", "right"): - raise ValueError( - f'invalid value for align, expected "left", "center", or "right" (not {align!r})' - ) - if vertical is not None and vertical not in ("top", "middle", "bottom"): - raise ValueError( - f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})' - ) - self.renderable = renderable - self.align = align - self.style = style - self.vertical = vertical - self.pad = pad - self.width = width - self.height = height - - def __repr__(self) -> str: - return f"Align({self.renderable!r}, {self.align!r})" - - @classmethod - def left( - cls, - renderable: "RenderableType", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> "Align": - """Align a renderable to the left.""" - return cls( - renderable, - "left", - style=style, - vertical=vertical, - pad=pad, - width=width, - height=height, - ) - - @classmethod - def center( - cls, - renderable: "RenderableType", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> "Align": - """Align a renderable to the center.""" - return cls( - renderable, - "center", - style=style, - vertical=vertical, - pad=pad, - width=width, - height=height, - ) - - @classmethod - def right( - cls, - renderable: "RenderableType", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> "Align": - """Align a renderable to the right.""" - return cls( - renderable, - "right", - style=style, - vertical=vertical, - pad=pad, - width=width, - height=height, - ) - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - align = self.align - width = console.measure(self.renderable, options=options).maximum - rendered = console.render( - Constrain( - self.renderable, width if self.width is None else min(width, self.width) - ), - options.update(height=None), - ) - lines = list(Segment.split_lines(rendered)) - width, height = Segment.get_shape(lines) - lines = Segment.set_shape(lines, width, height) - new_line = Segment.line() - excess_space = options.max_width - width - style = console.get_style(self.style) if self.style is not None else None - - def generate_segments() -> Iterable[Segment]: - if excess_space <= 0: - # Exact fit - for line in lines: - yield from line - yield new_line - - elif align == "left": - # Pad on the right - pad = Segment(" " * excess_space, style) if self.pad else None - for line in lines: - yield from line - if pad: - yield pad - yield new_line - - elif align == "center": - # Pad left and right - left = excess_space // 2 - pad = Segment(" " * left, style) - pad_right = ( - Segment(" " * (excess_space - left), style) if self.pad else None - ) - for line in lines: - if left: - yield pad - yield from line - if pad_right: - yield pad_right - yield new_line - - elif align == "right": - # Padding on left - pad = Segment(" " * excess_space, style) - for line in lines: - yield pad - yield from line - yield new_line - - blank_line = ( - Segment(f"{' ' * (self.width or options.max_width)}\n", style) - if self.pad - else Segment("\n") - ) - - def blank_lines(count: int) -> Iterable[Segment]: - if count > 0: - for _ in range(count): - yield blank_line - - vertical_height = self.height or options.height - iter_segments: Iterable[Segment] - if self.vertical and vertical_height is not None: - if self.vertical == "top": - bottom_space = vertical_height - height - iter_segments = chain(generate_segments(), blank_lines(bottom_space)) - elif self.vertical == "middle": - top_space = (vertical_height - height) // 2 - bottom_space = vertical_height - top_space - height - iter_segments = chain( - blank_lines(top_space), - generate_segments(), - blank_lines(bottom_space), - ) - else: # self.vertical == "bottom": - top_space = vertical_height - height - iter_segments = chain(blank_lines(top_space), generate_segments()) - else: - iter_segments = generate_segments() - if self.style: - style = console.get_style(self.style) - iter_segments = Segment.apply_style(iter_segments, style) - yield from iter_segments - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - measurement = Measurement.get(console, options, self.renderable) - return measurement - - -class VerticalCenter(JupyterMixin): - """Vertically aligns a renderable. - - Warn: - This class is deprecated and may be removed in a future version. Use Align class with - `vertical="middle"`. - - Args: - renderable (RenderableType): A renderable object. - """ - - def __init__( - self, - renderable: "RenderableType", - style: Optional[StyleType] = None, - ) -> None: - self.renderable = renderable - self.style = style - - def __repr__(self) -> str: - return f"VerticalCenter({self.renderable!r})" - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - style = console.get_style(self.style) if self.style is not None else None - lines = console.render_lines( - self.renderable, options.update(height=None), pad=False - ) - width, _height = Segment.get_shape(lines) - new_line = Segment.line() - height = options.height or options.size.height - top_space = (height - len(lines)) // 2 - bottom_space = height - top_space - len(lines) - blank_line = Segment(f"{' ' * width}", style) - - def blank_lines(count: int) -> Iterable[Segment]: - for _ in range(count): - yield blank_line - yield new_line - - if top_space > 0: - yield from blank_lines(top_space) - for line in lines: - yield from line - yield new_line - if bottom_space > 0: - yield from blank_lines(bottom_space) - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - measurement = Measurement.get(console, options, self.renderable) - return measurement - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.console import Console, Group - from pip._vendor.rich.highlighter import ReprHighlighter - from pip._vendor.rich.panel import Panel - - highlighter = ReprHighlighter() - console = Console() - - panel = Panel( - Group( - Align.left(highlighter("align='left'")), - Align.center(highlighter("align='center'")), - Align.right(highlighter("align='right'")), - ), - width=60, - style="on dark_blue", - title="Align", - ) - - console.print( - Align.center(panel, vertical="middle", style="on red", height=console.height) - ) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/styled.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/styled.py deleted file mode 100644 index 91cd0db31c14e30d4c1e2e9f36382b7a5e022870..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/styled.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import TYPE_CHECKING - -from .measure import Measurement -from .segment import Segment -from .style import StyleType - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderResult, RenderableType - - -class Styled: - """Apply a style to a renderable. - - Args: - renderable (RenderableType): Any renderable. - style (StyleType): A style to apply across the entire renderable. - """ - - def __init__(self, renderable: "RenderableType", style: "StyleType") -> None: - self.renderable = renderable - self.style = style - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - style = console.get_style(self.style) - rendered_segments = console.render(self.renderable, options) - segments = Segment.apply_style(rendered_segments, style) - return segments - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - return Measurement.get(console, options, self.renderable) - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich import print - from pip._vendor.rich.panel import Panel - - panel = Styled(Panel("hello"), "on blue") - print(panel) diff --git a/spaces/Autopixel/blurry-faces/app.py b/spaces/Autopixel/blurry-faces/app.py deleted file mode 100644 index aec60251cf39fc9bbad7ddb0dd3d711ae5e5495a..0000000000000000000000000000000000000000 --- a/spaces/Autopixel/blurry-faces/app.py +++ /dev/null @@ -1,59 +0,0 @@ -import cv2 -import gradio as gr -from typing import Union, Tuple -from PIL import Image, ImageOps -import numpy as np -import torch - -model = torch.jit.load('./model/model.pt').eval() - -def resize_with_padding(img: Image.Image, expected_size: Tuple[int, int]) -> Image.Image: - img.thumbnail((expected_size[0], expected_size[1])) - delta_width = expected_size[0] - img.size[0] - delta_height = expected_size[1] - img.size[1] - pad_width = delta_width // 2 - pad_height = delta_height // 2 - padding = (pad_width, pad_height, delta_width - pad_width, delta_height - pad_height) - return ImageOps.expand(img, padding), padding - -def preprocess_image(img: Image.Image, size: int = 512) -> Tuple[Image.Image, torch.tensor, Tuple[int]]: - pil_img, padding = resize_with_padding(img, (size, size)) - - img = (np.array(pil_img).astype(np.float32) / 255) - np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3) - img = img / np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3) - img = np.transpose(img, (2, 0, 1)) - - return pil_img, torch.tensor(img[None]), padding - -def soft_blur_with_mask(image: Image.Image, mask: torch.tensor, padding: Tuple[int]) -> Image.Image: - image = np.array(image) - # Create a blurred copy of the original image. - blurred_image = cv2.GaussianBlur(image, (221, 221), sigmaX=20, sigmaY=20) - image_height, image_width = image.shape[:2] - mask = cv2.resize(mask.astype(np.uint8), (image_width, image_height), interpolation=cv2.INTER_NEAREST) - # Blurring the mask itself to get a softer mask with no firm edges - mask = cv2.GaussianBlur(mask.astype(np.float32), (11, 11), 10, 10)[:, :, None] - - # Take the blurred image where the mask it positive, and the original image where the image is original - image = (mask * blurred_image + (1.0 - mask) * image) - pad_w, pad_h, _, _ = padding - img_w, img_h, _ = image.shape - image = image[(pad_h):(img_h-pad_h), (pad_w):(img_w-pad_w), :] - return Image.fromarray(image.astype(np.uint8)) - -def run(image, size): - pil_image, torch_image, padding = preprocess_image(image, size=size) - - with torch.inference_mode(): - mask = model(torch_image) - mask = mask.argmax(dim=1).numpy().squeeze() - - return soft_blur_with_mask(pil_image, mask, padding) - -content_image_input = gr.inputs.Image(label="Entrada", type="pil") -model_image_size = gr.inputs.Radio([256, 384, 512, 1024], type="value", default=512, label="Ajustar nivel de inferencia") - -app_interface = gr.Interface(fn=run, - inputs=[content_image_input, model_image_size], - outputs="image") -app_interface.launch() \ No newline at end of file diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/model_zoo.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/model_zoo.py deleted file mode 100644 index 5b90bc9a165ea46ada72ed0e71f1e80e71ea9f40..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/model_zoo.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os -from typing import Optional -import pkg_resources -import torch - -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate -from detectron2.modeling import build_model - - -class _ModelZooUrls(object): - """ - Mapping from names to officially released Detectron2 pre-trained models. - """ - - S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" - - # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl - CONFIG_PATH_TO_URL_SUFFIX = { - # COCO Detection with Faster R-CNN - "COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl", - "COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl", - "COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl", - "COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl", - "COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl", - "COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl", - "COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl", - "COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl", - "COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl", - "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl", - # COCO Detection with RetinaNet - "COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl", - "COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl", - "COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl", - # COCO Detection with RPN and Fast R-CNN - "COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl", - "COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl", - "COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl", - # COCO Instance Segmentation Baselines with Mask R-CNN - "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl", - "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa - # New baselines using Large-Scale Jitter and Longer Training Schedule - "new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ": "42047764/model_final_bb69de.pkl", - "new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ": "42047638/model_final_89a8d3.pkl", - "new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ": "42019571/model_final_14d201.pkl", - "new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ": "42025812/model_final_4f7b58.pkl", - "new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ": "42131867/model_final_0bb7ae.pkl", - "new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ": "42073830/model_final_f96b26.pkl", - "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ": "42047771/model_final_b7fbab.pkl", # noqa - "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ": "42132721/model_final_5d87c1.pkl", # noqa - "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ": "42025447/model_final_f1362d.pkl", # noqa - "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ": "42047784/model_final_6ba57e.pkl", # noqa - "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ": "42047642/model_final_27b9c1.pkl", # noqa - "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ": "42045954/model_final_ef3a80.pkl", # noqa - # COCO Person Keypoint Detection Baselines with Keypoint R-CNN - "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl", - "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl", - "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl", - "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl", - # COCO Panoptic Segmentation Baselines with Panoptic FPN - "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl", - "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl", - "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl", - # LVIS Instance Segmentation Baselines with Mask R-CNN - "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa - "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa - "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa - # Cityscapes & Pascal VOC Baselines - "Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl", - "PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl", - # Other Settings - "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl", - "Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl", - "Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl", - "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl", - "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa - # D1 Comparisons - "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa - "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa - "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl", - } - - @staticmethod - def query(config_path: str) -> Optional[str]: - """ - Args: - config_path: relative config filename - """ - name = config_path.replace(".yaml", "").replace(".py", "") - if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: - suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name] - return _ModelZooUrls.S3_PREFIX + name + "/" + suffix - return None - - -def get_checkpoint_url(config_path): - """ - Returns the URL to the model trained using the given config - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - Returns: - str: a URL to the model - """ - url = _ModelZooUrls.query(config_path) - if url is None: - raise RuntimeError("Pretrained model for {} is not available!".format(config_path)) - return url - - -def get_config_file(config_path): - """ - Returns path to a builtin config file. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - Returns: - str: the real path to the config file. - """ - cfg_file = pkg_resources.resource_filename( - "detectron2.model_zoo", os.path.join("configs", config_path) - ) - if not os.path.exists(cfg_file): - raise RuntimeError("{} not available in Model Zoo!".format(config_path)) - return cfg_file - - -def get_config(config_path, trained: bool = False): - """ - Returns a config object for a model in model zoo. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. - If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used - instead; this will typically (though not always) initialize a subset of weights using - an ImageNet pre-trained model, while randomly initializing the other weights. - - Returns: - CfgNode or omegaconf.DictConfig: a config object - """ - cfg_file = get_config_file(config_path) - if cfg_file.endswith(".yaml"): - cfg = get_cfg() - cfg.merge_from_file(cfg_file) - if trained: - cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) - return cfg - elif cfg_file.endswith(".py"): - cfg = LazyConfig.load(cfg_file) - if trained: - url = get_checkpoint_url(config_path) - if "train" in cfg and "init_checkpoint" in cfg.train: - cfg.train.init_checkpoint = url - else: - raise NotImplementedError - return cfg - - -def get(config_path, trained: bool = False, device: Optional[str] = None): - """ - Get a model specified by relative path under Detectron2's official ``configs/`` directory. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - trained (bool): see :func:`get_config`. - device (str or None): overwrite the device in config, if given. - - Returns: - nn.Module: a detectron2 model. Will be in training mode. - - Example: - :: - from detectron2 import model_zoo - model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) - """ - cfg = get_config(config_path, trained) - if device is None and not torch.cuda.is_available(): - device = "cpu" - if device is not None and isinstance(cfg, CfgNode): - cfg.MODEL.DEVICE = device - - if isinstance(cfg, CfgNode): - model = build_model(cfg) - DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) - else: - model = instantiate(cfg.model) - if device is not None: - model = model.to(device) - if "train" in cfg and "init_checkpoint" in cfg.train: - DetectionCheckpointer(model).load(cfg.train.init_checkpoint) - return model diff --git a/spaces/Banbri/zcvzcv/src/lib/replaceWhiteWithTransparent.ts b/spaces/Banbri/zcvzcv/src/lib/replaceWhiteWithTransparent.ts deleted file mode 100644 index cee490fc1a0b19b2192ce86d6c8f9867a3a6a6d9..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/lib/replaceWhiteWithTransparent.ts +++ /dev/null @@ -1,37 +0,0 @@ -export function replaceWhiteWithTransparent(imageBase64: string): Promise { - return new Promise((resolve, reject) => { - const img = new Image(); - img.onload = () => { - const canvas = document.createElement('canvas'); - canvas.width = img.width; - canvas.height = img.height; - - const ctx = canvas.getContext('2d'); - if (!ctx) { - reject('Unable to get canvas 2D context'); - return; - } - - ctx.drawImage(img, 0, 0); - - const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height); - const data = imageData.data; - - for (let i = 0; i < data.length; i += 4) { - if (data[i] === 255 && data[i + 1] === 255 && data[i + 2] === 255) { - data[i + 3] = 0; - } - } - - ctx.putImageData(imageData, 0, 0); - - resolve(canvas.toDataURL()); - }; - - img.onerror = (err) => { - reject(err); - }; - - img.src = imageBase64; - }); -} \ No newline at end of file diff --git a/spaces/BigSalmon/GPT2Mask/app.py b/spaces/BigSalmon/GPT2Mask/app.py deleted file mode 100644 index 6b62f405dad179d361ec33f607cbd97e88edc9f6..0000000000000000000000000000000000000000 --- a/spaces/BigSalmon/GPT2Mask/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import streamlit as st -import numpy as np -import pandas as pd -import os -import torch -import torch.nn as nn -from transformers.activations import get_activation -from transformers import AutoTokenizer, AutoModelForCausalLM -from transformers import AutoTokenizer, AutoModel -from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast - - -st.title('GPT2:') - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -@st.cache(allow_output_mutation=True) -def get_model(): - #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MASKGPT2") - #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MASKGPT2") - #tokenizer = GPTNeoXTokenizerFast.from_pretrained("CarperAI/FIM-NeoX-1.3B") - #model = GPTNeoXForCausalLM.from_pretrained("BigSalmon/FormalInformalConcise-FIM-NeoX-1.3B") - tokenizer = AutoTokenizer.from_pretrained("BigSalmon/FamilyFeud") - model = AutoModelForCausalLM.from_pretrained("BigSalmon/FamilyFeud") - return model, tokenizer - -model, tokenizer = get_model() - -g = """*** - -original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. -infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. - -*** - -original:""" - -def prefix_format(sentence): - words = sentence.split() - if "[MASK]" in sentence: - words2 = words.index("[MASK]") - #print(words2) - output = ("<|SUF|> " + ' '.join(words[words2+1:]) + " <|PRE|> " + ' '.join(words[:words2]) + " <|MID|>") - st.write(output) - else: - st.write("Add [MASK] to sentence") - -with st.form(key='my_form'): - prompt = st.text_area(label='Enter sentence', value=g) - submit_button = st.form_submit_button(label='Submit') - submit_button6 = st.form_submit_button(label='Turn Into Infill Format. Just add [MASK] where you want it infilled') - if submit_button: - with torch.no_grad(): - text = tokenizer.encode(prompt) - myinput, past_key_values = torch.tensor([text]), None - myinput = myinput - myinput= myinput - logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False) - logits = logits[0,-1] - probabilities = torch.nn.functional.softmax(logits) - best_logits, best_indices = logits.topk(250) - best_words = [tokenizer.decode([idx.item()]) for idx in best_indices] - text.append(best_indices[0].item()) - best_probabilities = probabilities[best_indices].tolist() - words = [] - st.write(best_words) - if submit_button6: - prefix_format(prompt) \ No newline at end of file diff --git a/spaces/CVH-vn1210/make_hair/minigpt4/tasks/image_text_pretrain.py b/spaces/CVH-vn1210/make_hair/minigpt4/tasks/image_text_pretrain.py deleted file mode 100644 index a2214a2e887799fa5236f165ac7329b60bc81d8f..0000000000000000000000000000000000000000 --- a/spaces/CVH-vn1210/make_hair/minigpt4/tasks/image_text_pretrain.py +++ /dev/null @@ -1,18 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from minigpt4.common.registry import registry -from minigpt4.tasks.base_task import BaseTask - - -@registry.register_task("image_text_pretrain") -class ImageTextPretrainTask(BaseTask): - def __init__(self): - super().__init__() - - def evaluation(self, model, data_loader, cuda_enabled=True): - pass diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/coder/__init__.py b/spaces/CVPR/WALT/mmdet/core/bbox/coder/__init__.py deleted file mode 100644 index ae455ba8fc0e0727e2d581cdc8f20fceededf99a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/bbox/coder/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .base_bbox_coder import BaseBBoxCoder -from .bucketing_bbox_coder import BucketingBBoxCoder -from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder -from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder -from .pseudo_bbox_coder import PseudoBBoxCoder -from .tblr_bbox_coder import TBLRBBoxCoder -from .yolo_bbox_coder import YOLOBBoxCoder - -__all__ = [ - 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', - 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', - 'BucketingBBoxCoder' -] diff --git a/spaces/CVPR/WALT/mmdet/models/roi_heads/dynamic_roi_head.py b/spaces/CVPR/WALT/mmdet/models/roi_heads/dynamic_roi_head.py deleted file mode 100644 index 89427a931f45f5a920c0e66fd88058bf9fa05f5c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/roi_heads/dynamic_roi_head.py +++ /dev/null @@ -1,154 +0,0 @@ -import numpy as np -import torch - -from mmdet.core import bbox2roi -from mmdet.models.losses import SmoothL1Loss -from ..builder import HEADS -from .standard_roi_head import StandardRoIHead - -EPS = 1e-15 - - -@HEADS.register_module() -class DynamicRoIHead(StandardRoIHead): - """RoI head for `Dynamic R-CNN `_.""" - - def __init__(self, **kwargs): - super(DynamicRoIHead, self).__init__(**kwargs) - assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) - # the IoU history of the past `update_iter_interval` iterations - self.iou_history = [] - # the beta history of the past `update_iter_interval` iterations - self.beta_history = [] - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """Forward function for training. - - Args: - x (list[Tensor]): list of multi-level img features. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - proposals (list[Tensors]): list of region proposals. - - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # assign gts and sample proposals - if self.with_bbox or self.with_mask: - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - cur_iou = [] - for i in range(num_imgs): - assign_result = self.bbox_assigner.assign( - proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], - gt_labels[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - proposal_list[i], - gt_bboxes[i], - gt_labels[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - # record the `iou_topk`-th largest IoU in an image - iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, - len(assign_result.max_overlaps)) - ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) - cur_iou.append(ious[-1].item()) - sampling_results.append(sampling_result) - # average the current IoUs over images - cur_iou = np.mean(cur_iou) - self.iou_history.append(cur_iou) - - losses = dict() - # bbox head forward and loss - if self.with_bbox: - bbox_results = self._bbox_forward_train(x, sampling_results, - gt_bboxes, gt_labels, - img_metas) - losses.update(bbox_results['loss_bbox']) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train(x, sampling_results, - bbox_results['bbox_feats'], - gt_masks, img_metas) - losses.update(mask_results['loss_mask']) - - # update IoU threshold and SmoothL1 beta - update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval - if len(self.iou_history) % update_iter_interval == 0: - new_iou_thr, new_beta = self.update_hyperparameters() - - return losses - - def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, - img_metas): - num_imgs = len(img_metas) - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward(x, rois) - - bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, self.train_cfg) - # record the `beta_topk`-th smallest target - # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets - # and bbox_weights, respectively - pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) - num_pos = len(pos_inds) - cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) - beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, - num_pos) - cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() - self.beta_history.append(cur_target) - loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update(loss_bbox=loss_bbox) - return bbox_results - - def update_hyperparameters(self): - """Update hyperparameters like IoU thresholds for assigner and beta for - SmoothL1 loss based on the training statistics. - - Returns: - tuple[float]: the updated ``iou_thr`` and ``beta``. - """ - new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, - np.mean(self.iou_history)) - self.iou_history = [] - self.bbox_assigner.pos_iou_thr = new_iou_thr - self.bbox_assigner.neg_iou_thr = new_iou_thr - self.bbox_assigner.min_pos_iou = new_iou_thr - if (np.median(self.beta_history) < EPS): - # avoid 0 or too small value for new_beta - new_beta = self.bbox_head.loss_bbox.beta - else: - new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, - np.median(self.beta_history)) - self.beta_history = [] - self.bbox_head.loss_bbox.beta = new_beta - return new_iou_thr, new_beta diff --git a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/env.py b/spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/CosmicSage/Linaqruf-anything-v3.0/app.py b/spaces/CosmicSage/Linaqruf-anything-v3.0/app.py deleted file mode 100644 index 16e8131a0bbf7b06956e69e2b7758fa01e4eb51f..0000000000000000000000000000000000000000 --- a/spaces/CosmicSage/Linaqruf-anything-v3.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Linaqruf/anything-v3.0").launch() \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/D_S_I_G_.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/D_S_I_G_.py deleted file mode 100644 index d902a29080aff5a275f530c7658d3c9eb4498034..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/D_S_I_G_.py +++ /dev/null @@ -1,151 +0,0 @@ -from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval -from fontTools.misc import sstruct -from . import DefaultTable -import base64 - -DSIG_HeaderFormat = """ - > # big endian - ulVersion: L - usNumSigs: H - usFlag: H -""" -# followed by an array of usNumSigs DSIG_Signature records -DSIG_SignatureFormat = """ - > # big endian - ulFormat: L - ulLength: L # length includes DSIG_SignatureBlock header - ulOffset: L -""" -# followed by an array of usNumSigs DSIG_SignatureBlock records, -# each followed immediately by the pkcs7 bytes -DSIG_SignatureBlockFormat = """ - > # big endian - usReserved1: H - usReserved2: H - cbSignature: l # length of following raw pkcs7 data -""" - -# -# NOTE -# the DSIG table format allows for SignatureBlocks residing -# anywhere in the table and possibly in a different order as -# listed in the array after the first table header -# -# this implementation does not keep track of any gaps and/or data -# before or after the actual signature blocks while decompiling, -# and puts them in the same physical order as listed in the header -# on compilation with no padding whatsoever. -# - - -class table_D_S_I_G_(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) - assert self.ulVersion == 1, "DSIG ulVersion must be 1" - assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" - self.signatureRecords = sigrecs = [] - for n in range(self.usNumSigs): - sigrec, newData = sstruct.unpack2( - DSIG_SignatureFormat, newData, SignatureRecord() - ) - assert sigrec.ulFormat == 1, ( - "DSIG signature record #%d ulFormat must be 1" % n - ) - sigrecs.append(sigrec) - for sigrec in sigrecs: - dummy, newData = sstruct.unpack2( - DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec - ) - assert sigrec.usReserved1 == 0, ( - "DSIG signature record #%d usReserverd1 must be 0" % n - ) - assert sigrec.usReserved2 == 0, ( - "DSIG signature record #%d usReserverd2 must be 0" % n - ) - sigrec.pkcs7 = newData[: sigrec.cbSignature] - - def compile(self, ttFont): - packed = sstruct.pack(DSIG_HeaderFormat, self) - headers = [packed] - offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) - data = [] - for sigrec in self.signatureRecords: - # first pack signature block - sigrec.cbSignature = len(sigrec.pkcs7) - packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 - data.append(packed) - # update redundant length field - sigrec.ulLength = len(packed) - # update running table offset - sigrec.ulOffset = offset - headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) - offset += sigrec.ulLength - if offset % 2: - # Pad to even bytes - data.append(b"\0") - return bytesjoin(headers + data) - - def toXML(self, xmlWriter, ttFont): - xmlWriter.comment( - "note that the Digital Signature will be invalid after recompilation!" - ) - xmlWriter.newline() - xmlWriter.simpletag( - "tableHeader", - version=self.ulVersion, - numSigs=self.usNumSigs, - flag="0x%X" % self.usFlag, - ) - for sigrec in self.signatureRecords: - xmlWriter.newline() - sigrec.toXML(xmlWriter, ttFont) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "tableHeader": - self.signatureRecords = [] - self.ulVersion = safeEval(attrs["version"]) - self.usNumSigs = safeEval(attrs["numSigs"]) - self.usFlag = safeEval(attrs["flag"]) - return - if name == "SignatureRecord": - sigrec = SignatureRecord() - sigrec.fromXML(name, attrs, content, ttFont) - self.signatureRecords.append(sigrec) - - -pem_spam = lambda l, spam={ - "-----BEGIN PKCS7-----": True, - "-----END PKCS7-----": True, - "": True, -}: not spam.get(l.strip()) - - -def b64encode(b): - s = base64.b64encode(b) - # Line-break at 76 chars. - items = [] - while s: - items.append(tostr(s[:76])) - items.append("\n") - s = s[76:] - return strjoin(items) - - -class SignatureRecord(object): - def __repr__(self): - return "<%s: %s>" % (self.__class__.__name__, self.__dict__) - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, format=self.ulFormat) - writer.newline() - writer.write_noindent("-----BEGIN PKCS7-----\n") - writer.write_noindent(b64encode(self.pkcs7)) - writer.write_noindent("-----END PKCS7-----\n") - writer.endtag(self.__class__.__name__) - - def fromXML(self, name, attrs, content, ttFont): - self.ulFormat = safeEval(attrs["format"]) - self.usReserved1 = safeEval(attrs.get("reserved1", "0")) - self.usReserved2 = safeEval(attrs.get("reserved2", "0")) - self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/base.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/base.py deleted file mode 100644 index 5306b4f3e26d83bd84aa8555485292598e65b3f8..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/base.py +++ /dev/null @@ -1,1815 +0,0 @@ -from __future__ import annotations - -import json -import re -import tempfile -import textwrap -from pathlib import Path -from typing import Iterable - -import huggingface_hub -import requests -import semantic_version as semver -from gradio_client.documentation import document, set_documentation_group -from huggingface_hub import CommitOperationAdd - -from gradio.themes.utils import ( - colors, - fonts, - get_matching_version, - get_theme_assets, - sizes, -) -from gradio.themes.utils.readme_content import README_CONTENT - -set_documentation_group("themes") - - -class ThemeClass: - def __init__(self): - self._stylesheets = [] - self.name = None - - def _get_theme_css(self): - css = {} - dark_css = {} - - for attr, val in self.__dict__.items(): - if attr.startswith("_"): - continue - if val is None: - if attr.endswith("_dark"): - dark_css[attr[:-5]] = None - continue - else: - raise ValueError( - f"Cannot set '{attr}' to None - only dark mode variables can be None." - ) - val = str(val) - pattern = r"(\*)([\w_]+)(\b)" - - def repl_func(match): - full_match = match.group(0) - if full_match.startswith("*") and full_match.endswith("_dark"): - raise ValueError( - f"Cannot refer '{attr}' to '{val}' - dark variable references are automatically used for dark mode attributes, so do not use the _dark suffix in the value." - ) - if ( - attr.endswith("_dark") - and full_match.startswith("*") - and attr[:-5] == full_match[1:] - ): - raise ValueError( - f"Cannot refer '{attr}' to '{val}' - if dark and light mode values are the same, set dark mode version to None." - ) - - word = match.group(2) - word = word.replace("_", "-") - return f"var(--{word})" - - val = re.sub(pattern, repl_func, val) - - attr = attr.replace("_", "-") - - if attr.endswith("-dark"): - attr = attr[:-5] - dark_css[attr] = val - else: - css[attr] = val - - for attr, val in css.items(): - if attr not in dark_css: - dark_css[attr] = val - - css_code = ( - ":root {\n" - + "\n".join([f" --{attr}: {val};" for attr, val in css.items()]) - + "\n}" - ) - dark_css_code = ( - ".dark {\n" - + "\n".join([f" --{attr}: {val};" for attr, val in dark_css.items()]) - + "\n}" - ) - - return f"{css_code}\n{dark_css_code}" - - def to_dict(self): - """Convert the theme into a python dictionary.""" - schema = {"theme": {}} - for prop in dir(self): - if ( - not prop.startswith("_") - or prop.startswith("_font") - or prop == "_stylesheets" - or prop == "name" - ) and isinstance(getattr(self, prop), (list, str)): - schema["theme"][prop] = getattr(self, prop) - return schema - - @classmethod - def load(cls, path: str) -> ThemeClass: - """Load a theme from a json file. - - Parameters: - path: The filepath to read. - """ - with open(path) as fp: - return cls.from_dict(json.load(fp, object_hook=fonts.as_font)) - - @classmethod - def from_dict(cls, theme: dict[str, dict[str, str]]) -> ThemeClass: - """Create a theme instance from a dictionary representation. - - Parameters: - theme: The dictionary representation of the theme. - """ - new_theme = cls() - for prop, value in theme["theme"].items(): - setattr(new_theme, prop, value) - - # For backwards compatibility, load attributes in base theme not in the loaded theme from the base theme. - base = Base() - for attr in base.__dict__: - if not attr.startswith("_") and not hasattr(new_theme, attr): - setattr(new_theme, attr, getattr(base, attr)) - - return new_theme - - def dump(self, filename: str): - """Write the theme to a json file. - - Parameters: - filename: The path to write the theme too - """ - Path(filename).write_text(json.dumps(self.to_dict(), cls=fonts.FontEncoder)) - - @classmethod - def from_hub(cls, repo_name: str, hf_token: str | None = None): - """Load a theme from the hub. - - This DOES NOT require a HuggingFace account for downloading publicly available themes. - - Parameters: - repo_name: string of the form /@. If a semantic version expression is omitted, the latest version will be fetched. - hf_token: HuggingFace Token. Only needed to download private themes. - """ - if "@" not in repo_name: - name, version = repo_name, None - else: - name, version = repo_name.split("@") - - api = huggingface_hub.HfApi(token=hf_token) - - try: - space_info = api.space_info(name) - except requests.HTTPError as e: - raise ValueError(f"The space {name} does not exist") from e - - assets = get_theme_assets(space_info) - matching_version = get_matching_version(assets, version) - - if not matching_version: - raise ValueError( - f"Cannot find a matching version for expression {version} " - f"from files {[f.filename for f in assets]}" - ) - - theme_file = huggingface_hub.hf_hub_download( - repo_id=name, - repo_type="space", - filename=f"themes/theme_schema@{matching_version.version}.json", - ) - theme = cls.load(theme_file) - theme.name = name - return theme - - @staticmethod - def _get_next_version(space_info: huggingface_hub.hf_api.SpaceInfo) -> str: - assets = get_theme_assets(space_info) - latest_version = max(assets, key=lambda asset: asset.version).version - return str(latest_version.next_patch()) - - @staticmethod - def _theme_version_exists( - space_info: huggingface_hub.hf_api.SpaceInfo, version: str - ) -> bool: - assets = get_theme_assets(space_info) - return any(a.version == semver.Version(version) for a in assets) - - def push_to_hub( - self, - repo_name: str, - org_name: str | None = None, - version: str | None = None, - hf_token: str | None = None, - theme_name: str | None = None, - description: str | None = None, - private: bool = False, - ): - """Upload a theme to the HuggingFace hub. - - This requires a HuggingFace account. - - Parameters: - repo_name: The name of the repository to store the theme assets, e.g. 'my_theme' or 'sunset'. - org_name: The name of the org to save the space in. If None (the default), the username corresponding to the logged in user, or hƒ_token is used. - version: A semantic version tag for theme. Bumping the version tag lets you publish updates to a theme without changing the look of applications that already loaded your theme. - hf_token: API token for your HuggingFace account - theme_name: Name for the name. If None, defaults to repo_name - description: A long form description to your theme. - """ - - from gradio import __version__ - - api = huggingface_hub.HfApi() - - if not hf_token: - try: - author = huggingface_hub.whoami()["name"] - except OSError as e: - raise ValueError( - "In order to push to hub, log in via `huggingface-cli login` " - "or provide a theme_token to push_to_hub. For more information " - "see https://huggingface.co/docs/huggingface_hub/quick-start#login" - ) from e - else: - author = huggingface_hub.whoami(token=hf_token)["name"] - - space_id = f"{org_name or author}/{repo_name}" - - try: - space_info = api.space_info(space_id) - except requests.HTTPError: - space_info = None - - space_exists = space_info is not None - - # If no version, set the version to next patch release - if not version: - version = self._get_next_version(space_info) if space_exists else "0.0.1" - else: - _ = semver.Version(version) - - if space_exists and self._theme_version_exists(space_info, version): - raise ValueError( - f"The space {space_id} already has a " - f"theme with version {version}. See: themes/theme_schema@{version}.json. " - "To manually override this version, use the HuggingFace hub UI." - ) - - theme_name = theme_name or repo_name - - with tempfile.NamedTemporaryFile( - mode="w", delete=False, suffix=".json" - ) as css_file: - contents = self.to_dict() - contents["version"] = version - json.dump(contents, css_file, cls=fonts.FontEncoder) - with tempfile.NamedTemporaryFile(mode="w", delete=False) as readme_file: - readme_content = README_CONTENT.format( - theme_name=theme_name, - description=description or "Add a description of this theme here!", - author=author, - gradio_version=__version__, - ) - readme_file.write(textwrap.dedent(readme_content)) - with tempfile.NamedTemporaryFile(mode="w", delete=False) as app_file: - contents = (Path(__file__).parent / "app.py").read_text() - contents = re.sub( - r"theme=gr.themes.Default\(\)", - f"theme='{space_id}'", - contents, - ) - contents = re.sub(r"{THEME}", theme_name or repo_name, contents) - contents = re.sub(r"{AUTHOR}", org_name or author, contents) - contents = re.sub(r"{SPACE_NAME}", repo_name, contents) - app_file.write(contents) - - operations = [ - CommitOperationAdd( - path_in_repo=f"themes/theme_schema@{version}.json", - path_or_fileobj=css_file.name, - ), - CommitOperationAdd( - path_in_repo="README.md", path_or_fileobj=readme_file.name - ), - CommitOperationAdd(path_in_repo="app.py", path_or_fileobj=app_file.name), - ] - - huggingface_hub.create_repo( - space_id, - repo_type="space", - space_sdk="gradio", - token=hf_token, - exist_ok=True, - private=private, - ) - - api.create_commit( - repo_id=space_id, - commit_message="Updating theme", - repo_type="space", - operations=operations, - token=hf_token, - ) - url = f"https://huggingface.co/spaces/{space_id}" - print(f"See your theme here! {url}") - return url - - -@document("push_to_hub", "from_hub", "load", "dump", "from_dict", "to_dict") -class Base(ThemeClass): - def __init__( - self, - *, - primary_hue: colors.Color | str = colors.blue, - secondary_hue: colors.Color | str = colors.blue, - neutral_hue: colors.Color | str = colors.gray, - text_size: sizes.Size | str = sizes.text_md, - spacing_size: sizes.Size | str = sizes.spacing_md, - radius_size: sizes.Size | str = sizes.radius_md, - font: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - fonts.GoogleFont("Source Sans Pro"), - "ui-sans-serif", - "system-ui", - "sans-serif", - ), - font_mono: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - fonts.GoogleFont("IBM Plex Mono"), - "ui-monospace", - "Consolas", - "monospace", - ), - ): - """ - Parameters: - primary_hue: The primary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object. - secondary_hue: The secondary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object. - neutral_hue: The neutral hue of the theme, used . Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object. - text_size: The size of the text. Load a preset, like gradio.themes.sizes.text_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object. - spacing_size: The size of the spacing. Load a preset, like gradio.themes.sizes.spacing_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object. - radius_size: The radius size of corners. Load a preset, like gradio.themes.sizes.radius_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object. - font: The primary font to use for the theme. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks. - font_mono: The monospace font to use for the theme, applies to code. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks. - """ - - self.name = "base" - - def expand_shortcut(shortcut, mode="color", prefix=None): - if not isinstance(shortcut, str): - return shortcut - if mode == "color": - for color in colors.Color.all: - if color.name == shortcut: - return color - raise ValueError(f"Color shortcut {shortcut} not found.") - elif mode == "size": - for size in sizes.Size.all: - if size.name == f"{prefix}_{shortcut}": - return size - raise ValueError(f"Size shortcut {shortcut} not found.") - - primary_hue = expand_shortcut(primary_hue, mode="color") - secondary_hue = expand_shortcut(secondary_hue, mode="color") - neutral_hue = expand_shortcut(neutral_hue, mode="color") - text_size = expand_shortcut(text_size, mode="size", prefix="text") - spacing_size = expand_shortcut(spacing_size, mode="size", prefix="spacing") - radius_size = expand_shortcut(radius_size, mode="size", prefix="radius") - - # Hue ranges - self.primary_50 = primary_hue.c50 - self.primary_100 = primary_hue.c100 - self.primary_200 = primary_hue.c200 - self.primary_300 = primary_hue.c300 - self.primary_400 = primary_hue.c400 - self.primary_500 = primary_hue.c500 - self.primary_600 = primary_hue.c600 - self.primary_700 = primary_hue.c700 - self.primary_800 = primary_hue.c800 - self.primary_900 = primary_hue.c900 - self.primary_950 = primary_hue.c950 - - self.secondary_50 = secondary_hue.c50 - self.secondary_100 = secondary_hue.c100 - self.secondary_200 = secondary_hue.c200 - self.secondary_300 = secondary_hue.c300 - self.secondary_400 = secondary_hue.c400 - self.secondary_500 = secondary_hue.c500 - self.secondary_600 = secondary_hue.c600 - self.secondary_700 = secondary_hue.c700 - self.secondary_800 = secondary_hue.c800 - self.secondary_900 = secondary_hue.c900 - self.secondary_950 = secondary_hue.c950 - - self.neutral_50 = neutral_hue.c50 - self.neutral_100 = neutral_hue.c100 - self.neutral_200 = neutral_hue.c200 - self.neutral_300 = neutral_hue.c300 - self.neutral_400 = neutral_hue.c400 - self.neutral_500 = neutral_hue.c500 - self.neutral_600 = neutral_hue.c600 - self.neutral_700 = neutral_hue.c700 - self.neutral_800 = neutral_hue.c800 - self.neutral_900 = neutral_hue.c900 - self.neutral_950 = neutral_hue.c950 - - # Spacing - self.spacing_xxs = spacing_size.xxs - self.spacing_xs = spacing_size.xs - self.spacing_sm = spacing_size.sm - self.spacing_md = spacing_size.md - self.spacing_lg = spacing_size.lg - self.spacing_xl = spacing_size.xl - self.spacing_xxl = spacing_size.xxl - - self.radius_xxs = radius_size.xxs - self.radius_xs = radius_size.xs - self.radius_sm = radius_size.sm - self.radius_md = radius_size.md - self.radius_lg = radius_size.lg - self.radius_xl = radius_size.xl - self.radius_xxl = radius_size.xxl - - self.text_xxs = text_size.xxs - self.text_xs = text_size.xs - self.text_sm = text_size.sm - self.text_md = text_size.md - self.text_lg = text_size.lg - self.text_xl = text_size.xl - self.text_xxl = text_size.xxl - - # Font - if not isinstance(font, Iterable): - font = [font] - self._font = [ - fontfam if isinstance(fontfam, fonts.Font) else fonts.Font(fontfam) - for fontfam in font - ] - if not isinstance(font_mono, Iterable): - font_mono = [font_mono] - self._font_mono = [ - fontfam if isinstance(fontfam, fonts.Font) else fonts.Font(fontfam) - for fontfam in font_mono - ] - self.font = ", ".join(str(font) for font in self._font) - self.font_mono = ", ".join(str(font) for font in self._font_mono) - - self._stylesheets = [] - for font in self._font + self._font_mono: - font_stylesheet = font.stylesheet() - if font_stylesheet: - self._stylesheets.append(font_stylesheet) - - self.set() - - def set( - self, - *, - # Body Attributes: These set set the values for the entire body of the app. - body_background_fill=None, - body_background_fill_dark=None, - body_text_color=None, - body_text_color_dark=None, - body_text_size=None, - body_text_color_subdued=None, - body_text_color_subdued_dark=None, - body_text_weight=None, - embed_radius=None, - # Element Colors: These set the colors for common elements. - background_fill_primary=None, - background_fill_primary_dark=None, - background_fill_secondary=None, - background_fill_secondary_dark=None, - border_color_accent=None, - border_color_accent_dark=None, - border_color_primary=None, - border_color_primary_dark=None, - color_accent=None, - color_accent_soft=None, - color_accent_soft_dark=None, - # Text: This sets the text styling for text elements. - link_text_color=None, - link_text_color_dark=None, - link_text_color_active=None, - link_text_color_active_dark=None, - link_text_color_hover=None, - link_text_color_hover_dark=None, - link_text_color_visited=None, - link_text_color_visited_dark=None, - prose_text_size=None, - prose_text_weight=None, - prose_header_text_weight=None, - # Shadows: These set the high-level shadow rendering styles. These variables are often referenced by other component-specific shadow variables. - shadow_drop=None, - shadow_drop_lg=None, - shadow_inset=None, - shadow_spread=None, - shadow_spread_dark=None, - # Layout Atoms: These set the style for common layout elements, such as the blocks that wrap components. - block_background_fill=None, - block_background_fill_dark=None, - block_border_color=None, - block_border_color_dark=None, - block_border_width=None, - block_border_width_dark=None, - block_info_text_color=None, - block_info_text_color_dark=None, - block_info_text_size=None, - block_info_text_weight=None, - block_label_background_fill=None, - block_label_background_fill_dark=None, - block_label_border_color=None, - block_label_border_color_dark=None, - block_label_border_width=None, - block_label_border_width_dark=None, - block_label_shadow=None, - block_label_text_color=None, - block_label_text_color_dark=None, - block_label_margin=None, - block_label_padding=None, - block_label_radius=None, - block_label_right_radius=None, - block_label_text_size=None, - block_label_text_weight=None, - block_padding=None, - block_radius=None, - block_shadow=None, - block_shadow_dark=None, - block_title_background_fill=None, - block_title_background_fill_dark=None, - block_title_border_color=None, - block_title_border_color_dark=None, - block_title_border_width=None, - block_title_border_width_dark=None, - block_title_text_color=None, - block_title_text_color_dark=None, - block_title_padding=None, - block_title_radius=None, - block_title_text_size=None, - block_title_text_weight=None, - container_radius=None, - form_gap_width=None, - layout_gap=None, - panel_background_fill=None, - panel_background_fill_dark=None, - panel_border_color=None, - panel_border_color_dark=None, - panel_border_width=None, - panel_border_width_dark=None, - section_header_text_size=None, - section_header_text_weight=None, - # Component Atoms: These set the style for elements within components. - chatbot_code_background_color=None, - chatbot_code_background_color_dark=None, - checkbox_background_color=None, - checkbox_background_color_dark=None, - checkbox_background_color_focus=None, - checkbox_background_color_focus_dark=None, - checkbox_background_color_hover=None, - checkbox_background_color_hover_dark=None, - checkbox_background_color_selected=None, - checkbox_background_color_selected_dark=None, - checkbox_border_color=None, - checkbox_border_color_dark=None, - checkbox_border_color_focus=None, - checkbox_border_color_focus_dark=None, - checkbox_border_color_hover=None, - checkbox_border_color_hover_dark=None, - checkbox_border_color_selected=None, - checkbox_border_color_selected_dark=None, - checkbox_border_radius=None, - checkbox_border_width=None, - checkbox_border_width_dark=None, - checkbox_check=None, - radio_circle=None, - checkbox_shadow=None, - checkbox_label_background_fill=None, - checkbox_label_background_fill_dark=None, - checkbox_label_background_fill_hover=None, - checkbox_label_background_fill_hover_dark=None, - checkbox_label_background_fill_selected=None, - checkbox_label_background_fill_selected_dark=None, - checkbox_label_border_color=None, - checkbox_label_border_color_dark=None, - checkbox_label_border_color_hover=None, - checkbox_label_border_color_hover_dark=None, - checkbox_label_border_width=None, - checkbox_label_border_width_dark=None, - checkbox_label_gap=None, - checkbox_label_padding=None, - checkbox_label_shadow=None, - checkbox_label_text_size=None, - checkbox_label_text_weight=None, - checkbox_label_text_color=None, - checkbox_label_text_color_dark=None, - checkbox_label_text_color_selected=None, - checkbox_label_text_color_selected_dark=None, - error_background_fill=None, - error_background_fill_dark=None, - error_border_color=None, - error_border_color_dark=None, - error_border_width=None, - error_border_width_dark=None, - error_text_color=None, - error_text_color_dark=None, - error_icon_color=None, - error_icon_color_dark=None, - input_background_fill=None, - input_background_fill_dark=None, - input_background_fill_focus=None, - input_background_fill_focus_dark=None, - input_background_fill_hover=None, - input_background_fill_hover_dark=None, - input_border_color=None, - input_border_color_dark=None, - input_border_color_focus=None, - input_border_color_focus_dark=None, - input_border_color_hover=None, - input_border_color_hover_dark=None, - input_border_width=None, - input_border_width_dark=None, - input_padding=None, - input_placeholder_color=None, - input_placeholder_color_dark=None, - input_radius=None, - input_shadow=None, - input_shadow_dark=None, - input_shadow_focus=None, - input_shadow_focus_dark=None, - input_text_size=None, - input_text_weight=None, - loader_color=None, - loader_color_dark=None, - slider_color=None, - slider_color_dark=None, - stat_background_fill=None, - stat_background_fill_dark=None, - table_border_color=None, - table_border_color_dark=None, - table_even_background_fill=None, - table_even_background_fill_dark=None, - table_odd_background_fill=None, - table_odd_background_fill_dark=None, - table_radius=None, - table_row_focus=None, - table_row_focus_dark=None, - # Buttons: These set the style for buttons. - button_border_width=None, - button_border_width_dark=None, - button_shadow=None, - button_shadow_active=None, - button_shadow_hover=None, - button_transition=None, - button_large_padding=None, - button_large_radius=None, - button_large_text_size=None, - button_large_text_weight=None, - button_small_padding=None, - button_small_radius=None, - button_small_text_size=None, - button_small_text_weight=None, - button_primary_background_fill=None, - button_primary_background_fill_dark=None, - button_primary_background_fill_hover=None, - button_primary_background_fill_hover_dark=None, - button_primary_border_color=None, - button_primary_border_color_dark=None, - button_primary_border_color_hover=None, - button_primary_border_color_hover_dark=None, - button_primary_text_color=None, - button_primary_text_color_dark=None, - button_primary_text_color_hover=None, - button_primary_text_color_hover_dark=None, - button_secondary_background_fill=None, - button_secondary_background_fill_dark=None, - button_secondary_background_fill_hover=None, - button_secondary_background_fill_hover_dark=None, - button_secondary_border_color=None, - button_secondary_border_color_dark=None, - button_secondary_border_color_hover=None, - button_secondary_border_color_hover_dark=None, - button_secondary_text_color=None, - button_secondary_text_color_dark=None, - button_secondary_text_color_hover=None, - button_secondary_text_color_hover_dark=None, - button_cancel_background_fill=None, - button_cancel_background_fill_dark=None, - button_cancel_background_fill_hover=None, - button_cancel_background_fill_hover_dark=None, - button_cancel_border_color=None, - button_cancel_border_color_dark=None, - button_cancel_border_color_hover=None, - button_cancel_border_color_hover_dark=None, - button_cancel_text_color=None, - button_cancel_text_color_dark=None, - button_cancel_text_color_hover=None, - button_cancel_text_color_hover_dark=None, - ) -> Base: - """ - Parameters: - body_background_fill: The background of the entire app. - body_background_fill_dark: The background of the entire app in dark mode. - body_text_color: The default text color. - body_text_color_dark: The default text color in dark mode. - body_text_size: The default text size. - body_text_color_subdued: The text color used for softer, less important text. - body_text_color_subdued_dark: The text color used for softer, less important text in dark mode. - body_text_weight: The default text weight. - embed_radius: The corner radius used for embedding when the app is embedded within a page. - background_fill_primary: The background primarily used for items placed directly on the page. - background_fill_primary_dark: The background primarily used for items placed directly on the page in dark mode. - background_fill_secondary: The background primarily used for items placed on top of another item. - background_fill_secondary_dark: The background primarily used for items placed on top of another item in dark mode. - border_color_accent: The border color used for accented items. - border_color_accent_dark: The border color used for accented items in dark mode. - border_color_primary: The border color primarily used for items placed directly on the page. - border_color_primary_dark: The border color primarily used for items placed directly on the page in dark mode. - color_accent: The color used for accented items. - color_accent_soft: The softer color used for accented items. - color_accent_soft_dark: The softer color used for accented items in dark mode. - link_text_color: The text color used for links. - link_text_color_dark: The text color used for links in dark mode. - link_text_color_active: The text color used for links when they are active. - link_text_color_active_dark: The text color used for links when they are active in dark mode. - link_text_color_hover: The text color used for links when they are hovered over. - link_text_color_hover_dark: The text color used for links when they are hovered over in dark mode. - link_text_color_visited: The text color used for links when they have been visited. - link_text_color_visited_dark: The text color used for links when they have been visited in dark mode. - prose_text_size: The text size used for markdown and other prose. - prose_text_weight: The text weight used for markdown and other prose. - prose_header_text_weight: The text weight of a header used for markdown and other prose. - shadow_drop: Drop shadow used by other shadowed items. - shadow_drop_lg: Larger drop shadow used by other shadowed items. - shadow_inset: Inset shadow used by other shadowed items. - shadow_spread: Size of shadow spread used by shadowed items. - shadow_spread_dark: Size of shadow spread used by shadowed items in dark mode. - block_background_fill: The background around an item. - block_background_fill_dark: The background around an item in dark mode. - block_border_color: The border color around an item. - block_border_color_dark: The border color around an item in dark mode. - block_border_width: The border width around an item. - block_border_width_dark: The border width around an item in dark mode. - block_info_text_color: The color of the info text. - block_info_text_color_dark: The color of the info text in dark mode. - block_info_text_size: The size of the info text. - block_info_text_weight: The weight of the info text. - block_label_background_fill: The background of the title label of a media element (e.g. image). - block_label_background_fill_dark: The background of the title label of a media element (e.g. image) in dark mode. - block_label_border_color: The border color of the title label of a media element (e.g. image). - block_label_border_color_dark: The border color of the title label of a media element (e.g. image) in dark mode. - block_label_border_width: The border width of the title label of a media element (e.g. image). - block_label_border_width_dark: The border width of the title label of a media element (e.g. image) in dark mode. - block_label_shadow: The shadow of the title label of a media element (e.g. image). - block_label_text_color: The text color of the title label of a media element (e.g. image). - block_label_text_color_dark: The text color of the title label of a media element (e.g. image) in dark mode. - block_label_margin: The margin of the title label of a media element (e.g. image) from its surrounding container. - block_label_padding: The padding of the title label of a media element (e.g. image). - block_label_radius: The corner radius of the title label of a media element (e.g. image). - block_label_right_radius: The corner radius of a right-aligned helper label. - block_label_text_size: The text size of the title label of a media element (e.g. image). - block_label_text_weight: The text weight of the title label of a media element (e.g. image). - block_padding: The padding around an item. - block_radius: The corner radius around an item. - block_shadow: The shadow under an item. - block_shadow_dark: The shadow under an item in dark mode. - block_title_background_fill: The background of the title of a form element (e.g. textbox). - block_title_background_fill_dark: The background of the title of a form element (e.g. textbox) in dark mode. - block_title_border_color: The border color of the title of a form element (e.g. textbox). - block_title_border_color_dark: The border color of the title of a form element (e.g. textbox) in dark mode. - block_title_border_width: The border width of the title of a form element (e.g. textbox). - block_title_border_width_dark: The border width of the title of a form element (e.g. textbox) in dark mode. - block_title_text_color: The text color of the title of a form element (e.g. textbox). - block_title_text_color_dark: The text color of the title of a form element (e.g. textbox) in dark mode. - block_title_padding: The padding of the title of a form element (e.g. textbox). - block_title_radius: The corner radius of the title of a form element (e.g. textbox). - block_title_text_size: The text size of the title of a form element (e.g. textbox). - block_title_text_weight: The text weight of the title of a form element (e.g. textbox). - container_radius: The corner radius of a layout component that holds other content. - form_gap_width: The border gap between form elements, (e.g. consecutive textboxes). - layout_gap: The gap between items within a row or column. - panel_background_fill: The background of a panel. - panel_background_fill_dark: The background of a panel in dark mode. - panel_border_color: The border color of a panel. - panel_border_color_dark: The border color of a panel in dark mode. - panel_border_width: The border width of a panel. - panel_border_width_dark: The border width of a panel in dark mode. - section_header_text_size: The text size of a section header (e.g. tab name). - section_header_text_weight: The text weight of a section header (e.g. tab name). - chatbot_code_background_color: The background color of code blocks in the chatbot. - chatbot_code_background_color_dark: The background color of code blocks in the chatbot in dark mode. - checkbox_background_color: The background of a checkbox square or radio circle. - checkbox_background_color_dark: The background of a checkbox square or radio circle in dark mode. - checkbox_background_color_focus: The background of a checkbox square or radio circle when focused. - checkbox_background_color_focus_dark: The background of a checkbox square or radio circle when focused in dark mode. - checkbox_background_color_hover: The background of a checkbox square or radio circle when hovered over. - checkbox_background_color_hover_dark: The background of a checkbox square or radio circle when hovered over in dark mode. - checkbox_background_color_selected: The background of a checkbox square or radio circle when selected. - checkbox_background_color_selected_dark: The background of a checkbox square or radio circle when selected in dark mode. - checkbox_border_color: The border color of a checkbox square or radio circle. - checkbox_border_color_dark: The border color of a checkbox square or radio circle in dark mode. - checkbox_border_color_focus: The border color of a checkbox square or radio circle when focused. - checkbox_border_color_focus_dark: The border color of a checkbox square or radio circle when focused in dark mode. - checkbox_border_color_hover: The border color of a checkbox square or radio circle when hovered over. - checkbox_border_color_hover_dark: The border color of a checkbox square or radio circle when hovered over in dark mode. - checkbox_border_color_selected: The border color of a checkbox square or radio circle when selected. - checkbox_border_color_selected_dark: The border color of a checkbox square or radio circle when selected in dark mode. - checkbox_border_radius: The corner radius of a checkbox square. - checkbox_border_width: The border width of a checkbox square or radio circle. - checkbox_border_width_dark: The border width of a checkbox square or radio circle in dark mode. - checkbox_check: The checkmark visual of a checkbox square. - radio_circle: The circle visual of a radio circle. - checkbox_shadow: The shadow of a checkbox square or radio circle. - checkbox_label_background_fill: The background of the surrounding button of a checkbox or radio element. - checkbox_label_background_fill_dark: The background of the surrounding button of a checkbox or radio element in dark mode. - checkbox_label_background_fill_hover: The background of the surrounding button of a checkbox or radio element when hovered over. - checkbox_label_background_fill_hover_dark: The background of the surrounding button of a checkbox or radio element when hovered over in dark mode. - checkbox_label_background_fill_selected: The background of the surrounding button of a checkbox or radio element when selected. - checkbox_label_background_fill_selected_dark: The background of the surrounding button of a checkbox or radio element when selected in dark mode. - checkbox_label_border_color: The border color of the surrounding button of a checkbox or radio element. - checkbox_label_border_color_dark: The border color of the surrounding button of a checkbox or radio element in dark mode. - checkbox_label_border_color_hover: The border color of the surrounding button of a checkbox or radio element when hovered over. - checkbox_label_border_color_hover_dark: The border color of the surrounding button of a checkbox or radio element when hovered over in dark mode. - checkbox_label_border_width: The border width of the surrounding button of a checkbox or radio element. - checkbox_label_border_width_dark: The border width of the surrounding button of a checkbox or radio element in dark mode. - checkbox_label_gap: The gap consecutive checkbox or radio elements. - checkbox_label_padding: The padding of the surrounding button of a checkbox or radio element. - checkbox_label_shadow: The shadow of the surrounding button of a checkbox or radio element. - checkbox_label_text_size: The text size of the label accompanying a checkbox or radio element. - checkbox_label_text_weight: The text weight of the label accompanying a checkbox or radio element. - checkbox_label_text_color: The text color of the label accompanying a checkbox or radio element. - checkbox_label_text_color_dark: The text color of the label accompanying a checkbox or radio element in dark mode. - checkbox_label_text_color_selected: The text color of the label accompanying a checkbox or radio element when selected. - checkbox_label_text_color_selected_dark: The text color of the label accompanying a checkbox or radio element when selected in dark mode. - error_background_fill: The background of an error message. - error_background_fill_dark: The background of an error message in dark mode. - error_border_color: The border color of an error message. - error_border_color_dark: The border color of an error message in dark mode. - error_border_width: The border width of an error message. - error_border_width_dark: The border width of an error message in dark mode. - error_text_color: The text color of an error message. - error_text_color_dark: The text color of an error message in dark mode. - input_background_fill: The background of an input field. - input_background_fill_dark: The background of an input field in dark mode. - input_background_fill_focus: The background of an input field when focused. - input_background_fill_focus_dark: The background of an input field when focused in dark mode. - input_background_fill_hover: The background of an input field when hovered over. - input_background_fill_hover_dark: The background of an input field when hovered over in dark mode. - input_border_color: The border color of an input field. - input_border_color_dark: The border color of an input field in dark mode. - input_border_color_focus: The border color of an input field when focused. - input_border_color_focus_dark: The border color of an input field when focused in dark mode. - input_border_color_hover: The border color of an input field when hovered over. - input_border_color_hover_dark: The border color of an input field when hovered over in dark mode. - input_border_width: The border width of an input field. - input_border_width_dark: The border width of an input field in dark mode. - input_padding: The padding of an input field. - input_placeholder_color: The placeholder text color of an input field. - input_placeholder_color_dark: The placeholder text color of an input field in dark mode. - input_radius: The corner radius of an input field. - input_shadow: The shadow of an input field. - input_shadow_dark: The shadow of an input field in dark mode. - input_shadow_focus: The shadow of an input field when focused. - input_shadow_focus_dark: The shadow of an input field when focused in dark mode. - input_text_size: The text size of an input field. - input_text_weight: The text weight of an input field. - loader_color: The color of the loading animation while a request is pending. - loader_color_dark: The color of the loading animation while a request is pending in dark mode. - slider_color: The color of the slider in a range element. - slider_color_dark: The color of the slider in a range element in dark mode. - stat_background_fill: The background used for stats visuals (e.g. confidence bars in label). - stat_background_fill_dark: The background used for stats visuals (e.g. confidence bars in label) in dark mode. - table_border_color: The border color of a table. - table_border_color_dark: The border color of a table in dark mode. - table_even_background_fill: The background of even rows in a table. - table_even_background_fill_dark: The background of even rows in a table in dark mode. - table_odd_background_fill: The background of odd rows in a table. - table_odd_background_fill_dark: The background of odd rows in a table in dark mode. - table_radius: The corner radius of a table. - table_row_focus: The background of a focused row in a table. - table_row_focus_dark: The background of a focused row in a table in dark mode. - button_border_width: The border width of a button. - button_border_width_dark: The border width of a button in dark mode. - button_cancel_background_fill: The background of a button of "cancel" variant. - button_cancel_background_fill_dark: The background of a button of "cancel" variant in dark mode. - button_cancel_background_fill_hover: The background of a button of "cancel" variant when hovered over. - button_cancel_background_fill_hover_dark: The background of a button of "cancel" variant when hovered over in dark mode. - button_cancel_border_color: The border color of a button of "cancel" variant. - button_cancel_border_color_dark: The border color of a button of "cancel" variant in dark mode. - button_cancel_border_color_hover: The border color of a button of "cancel" variant when hovered over. - button_cancel_border_color_hover_dark: The border color of a button of "cancel" variant when hovered over in dark mode. - button_cancel_text_color: The text color of a button of "cancel" variant. - button_cancel_text_color_dark: The text color of a button of "cancel" variant in dark mode. - button_cancel_text_color_hover: The text color of a button of "cancel" variant when hovered over. - button_cancel_text_color_hover_dark: The text color of a button of "cancel" variant when hovered over in dark mode. - button_large_padding: The padding of a button with the default "large" size. - button_large_radius: The corner radius of a button with the default "large" size. - button_large_text_size: The text size of a button with the default "large" size. - button_large_text_weight: The text weight of a button with the default "large" size. - button_primary_background_fill: The background of a button of "primary" variant. - button_primary_background_fill_dark: The background of a button of "primary" variant in dark mode. - button_primary_background_fill_hover: The background of a button of "primary" variant when hovered over. - button_primary_background_fill_hover_dark: The background of a button of "primary" variant when hovered over in dark mode. - button_primary_border_color: The border color of a button of "primary" variant. - button_primary_border_color_dark: The border color of a button of "primary" variant in dark mode. - button_primary_border_color_hover: The border color of a button of "primary" variant when hovered over. - button_primary_border_color_hover_dark: The border color of a button of "primary" variant when hovered over in dark mode. - button_primary_text_color: The text color of a button of "primary" variant. - button_primary_text_color_dark: The text color of a button of "primary" variant in dark mode. - button_primary_text_color_hover: The text color of a button of "primary" variant when hovered over. - button_primary_text_color_hover_dark: The text color of a button of "primary" variant when hovered over in dark mode. - button_secondary_background_fill: The background of a button of default "secondary" variant. - button_secondary_background_fill_dark: The background of a button of default "secondary" variant in dark mode. - button_secondary_background_fill_hover: The background of a button of default "secondary" variant when hovered over. - button_secondary_background_fill_hover_dark: The background of a button of default "secondary" variant when hovered over in dark mode. - button_secondary_border_color: The border color of a button of default "secondary" variant. - button_secondary_border_color_dark: The border color of a button of default "secondary" variant in dark mode. - button_secondary_border_color_hover: The border color of a button of default "secondary" variant when hovered over. - button_secondary_border_color_hover_dark: The border color of a button of default "secondary" variant when hovered over in dark mode. - button_secondary_text_color: The text color of a button of default "secondary" variant. - button_secondary_text_color_dark: The text color of a button of default "secondary" variant in dark mode. - button_secondary_text_color_hover: The text color of a button of default "secondary" variant when hovered over. - button_secondary_text_color_hover_dark: The text color of a button of default "secondary" variant when hovered over in dark mode. - button_shadow: The shadow under a button. - button_shadow_active: The shadow under a button when pressed. - button_shadow_hover: The shadow under a button when hovered over. - button_small_padding: The padding of a button set to "small" size. - button_small_radius: The corner radius of a button set to "small" size. - button_small_text_size: The text size of a button set to "small" size. - button_small_text_weight: The text weight of a button set to "small" size. - button_transition: The transition animation duration of a button between regular, hover, and focused states. - """ - - # Body - self.body_background_fill = body_background_fill or getattr( - self, "body_background_fill", "*background_fill_primary" - ) - self.body_background_fill_dark = body_background_fill_dark or getattr( - self, "body_background_fill_dark", "*background_fill_primary" - ) - self.body_text_color = body_text_color or getattr( - self, "body_text_color", "*neutral_800" - ) - self.body_text_color_dark = body_text_color_dark or getattr( - self, "body_text_color_dark", "*neutral_100" - ) - self.body_text_size = body_text_size or getattr( - self, "body_text_size", "*text_md" - ) - self.body_text_weight = body_text_weight or getattr( - self, "body_text_weight", "400" - ) - self.embed_radius = embed_radius or getattr(self, "embed_radius", "*radius_lg") - # Core Colors - self.color_accent = color_accent or getattr( - self, "color_accent", "*primary_500" - ) - self.color_accent_soft = color_accent_soft or getattr( - self, "color_accent_soft", "*primary_50" - ) - self.color_accent_soft_dark = color_accent_soft_dark or getattr( - self, "color_accent_soft_dark", "*neutral_700" - ) - self.background_fill_primary = background_fill_primary or getattr( - self, "background_primary", "white" - ) - self.background_fill_primary_dark = background_fill_primary_dark or getattr( - self, "background_primary_dark", "*neutral_950" - ) - self.background_fill_secondary = background_fill_secondary or getattr( - self, "background_secondary", "*neutral_50" - ) - self.background_fill_secondary_dark = background_fill_secondary_dark or getattr( - self, "background_secondary_dark", "*neutral_900" - ) - self.border_color_accent = border_color_accent or getattr( - self, "border_color_accent", "*primary_300" - ) - self.border_color_accent_dark = border_color_accent_dark or getattr( - self, "border_color_accent_dark", "*neutral_600" - ) - self.border_color_primary = border_color_primary or getattr( - self, "border_color_primary", "*neutral_200" - ) - self.border_color_primary_dark = border_color_primary_dark or getattr( - self, "border_color_primary_dark", "*neutral_700" - ) - # Text Colors - self.link_text_color = link_text_color or getattr( - self, "link_text_color", "*secondary_600" - ) - self.link_text_color_active = link_text_color_active or getattr( - self, "link_text_color_active", "*secondary_600" - ) - self.link_text_color_active_dark = link_text_color_active_dark or getattr( - self, "link_text_color_active_dark", "*secondary_500" - ) - self.link_text_color_dark = link_text_color_dark or getattr( - self, "link_text_color_dark", "*secondary_500" - ) - self.link_text_color_hover = link_text_color_hover or getattr( - self, "link_text_color_hover", "*secondary_700" - ) - self.link_text_color_hover_dark = link_text_color_hover_dark or getattr( - self, "link_text_color_hover_dark", "*secondary_400" - ) - self.link_text_color_visited = link_text_color_visited or getattr( - self, "link_text_color_visited", "*secondary_500" - ) - self.link_text_color_visited_dark = link_text_color_visited_dark or getattr( - self, "link_text_color_visited_dark", "*secondary_600" - ) - self.body_text_color_subdued = body_text_color_subdued or getattr( - self, "body_text_color_subdued", "*neutral_400" - ) - self.body_text_color_subdued_dark = body_text_color_subdued_dark or getattr( - self, "body_text_color_subdued_dark", "*neutral_400" - ) - # Shadows - self.shadow_drop = shadow_drop or getattr( - self, "shadow_drop", "rgba(0,0,0,0.05) 0px 1px 2px 0px" - ) - self.shadow_drop_lg = shadow_drop_lg or getattr( - self, - "shadow_drop_lg", - "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", - ) - self.shadow_inset = shadow_inset or getattr( - self, "shadow_inset", "rgba(0,0,0,0.05) 0px 2px 4px 0px inset" - ) - self.shadow_spread = shadow_spread or getattr(self, "shadow_spread", "3px") - self.shadow_spread_dark = shadow_spread_dark or getattr( - self, "shadow_spread_dark", "1px" - ) - # Layout Atoms - self.block_background_fill = block_background_fill or getattr( - self, "block_background_fill", "*background_fill_primary" - ) - self.block_background_fill_dark = block_background_fill_dark or getattr( - self, "block_background_fill_dark", "*neutral_800" - ) - self.block_border_color = block_border_color or getattr( - self, "block_border_color", "*border_color_primary" - ) - self.block_border_color_dark = block_border_color_dark or getattr( - self, "block_border_color_dark", "*border_color_primary" - ) - self.block_border_width = block_border_width or getattr( - self, "block_border_width", "1px" - ) - self.block_border_width_dark = block_border_width_dark or getattr( - self, "block_border_width_dark", None - ) - self.block_info_text_color = block_info_text_color or getattr( - self, "block_info_text_color", "*body_text_color_subdued" - ) - self.block_info_text_color_dark = block_info_text_color_dark or getattr( - self, "block_info_text_color_dark", "*body_text_color_subdued" - ) - self.block_info_text_size = block_info_text_size or getattr( - self, "block_info_text_size", "*text_sm" - ) - self.block_info_text_weight = block_info_text_weight or getattr( - self, "block_info_text_weight", "400" - ) - self.block_label_background_fill = block_label_background_fill or getattr( - self, "block_label_background_fill", "*background_fill_primary" - ) - self.block_label_background_fill_dark = ( - block_label_background_fill_dark - or getattr( - self, "block_label_background_fill_dark", "*background_fill_secondary" - ) - ) - self.block_label_border_color = block_label_border_color or getattr( - self, "block_label_border_color", "*border_color_primary" - ) - self.block_label_border_color_dark = block_label_border_color_dark or getattr( - self, "block_label_border_color_dark", "*border_color_primary" - ) - self.block_label_border_width = block_label_border_width or getattr( - self, "block_label_border_width", "1px" - ) - self.block_label_border_width_dark = block_label_border_width_dark or getattr( - self, "block_label_border_width_dark", None - ) - self.block_label_shadow = block_label_shadow or getattr( - self, "block_label_shadow", "*block_shadow" - ) - self.block_label_text_color = block_label_text_color or getattr( - self, "block_label_text_color", "*neutral_500" - ) - self.block_label_text_color_dark = block_label_text_color_dark or getattr( - self, "block_label_text_color_dark", "*neutral_200" - ) - self.block_label_margin = block_label_margin or getattr( - self, "block_label_margin", "0" - ) - self.block_label_padding = block_label_padding or getattr( - self, "block_label_padding", "*spacing_sm *spacing_lg" - ) - self.block_label_radius = block_label_radius or getattr( - self, - "block_label_radius", - "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", - ) - self.block_label_right_radius = block_label_right_radius or getattr( - self, - "block_label_right_radius", - "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", - ) - self.block_label_text_size = block_label_text_size or getattr( - self, "block_label_text_size", "*text_sm" - ) - self.block_label_text_weight = block_label_text_weight or getattr( - self, "block_label_text_weight", "400" - ) - self.block_padding = block_padding or getattr( - self, "block_padding", "*spacing_xl calc(*spacing_xl + 2px)" - ) - self.block_radius = block_radius or getattr(self, "block_radius", "*radius_lg") - self.block_shadow = block_shadow or getattr(self, "block_shadow", "none") - self.block_shadow_dark = block_shadow_dark or getattr( - self, "block_shadow_dark", None - ) - self.block_title_background_fill = block_title_background_fill or getattr( - self, "block_title_background_fill", "none" - ) - self.block_title_background_fill_dark = ( - block_title_background_fill_dark - or getattr(self, "block_title_background_fill_dark", None) - ) - self.block_title_border_color = block_title_border_color or getattr( - self, "block_title_border_color", "none" - ) - self.block_title_border_color_dark = block_title_border_color_dark or getattr( - self, "block_title_border_color_dark", None - ) - self.block_title_border_width = block_title_border_width or getattr( - self, "block_title_border_width", "0px" - ) - self.block_title_border_width_dark = block_title_border_width_dark or getattr( - self, "block_title_border_width_dark", None - ) - self.block_title_text_color = block_title_text_color or getattr( - self, "block_title_text_color", "*neutral_500" - ) - self.block_title_text_color_dark = block_title_text_color_dark or getattr( - self, "block_title_text_color_dark", "*neutral_200" - ) - self.block_title_padding = block_title_padding or getattr( - self, "block_title_padding", "0" - ) - self.block_title_radius = block_title_radius or getattr( - self, "block_title_radius", "none" - ) - self.block_title_text_size = block_title_text_size or getattr( - self, "block_title_text_size", "*text_md" - ) - self.block_title_text_weight = block_title_text_weight or getattr( - self, "block_title_text_weight", "400" - ) - self.container_radius = container_radius or getattr( - self, "container_radius", "*radius_lg" - ) - self.form_gap_width = form_gap_width or getattr(self, "form_gap_width", "0px") - self.layout_gap = layout_gap or getattr(self, "layout_gap", "*spacing_xxl") - self.panel_background_fill = panel_background_fill or getattr( - self, "panel_background_fill", "*background_fill_secondary" - ) - self.panel_background_fill_dark = panel_background_fill_dark or getattr( - self, "panel_background_fill_dark", "*background_fill_secondary" - ) - self.panel_border_color = panel_border_color or getattr( - self, "panel_border_color", "*border_color_primary" - ) - self.panel_border_color_dark = panel_border_color_dark or getattr( - self, "panel_border_color_dark", "*border_color_primary" - ) - self.panel_border_width = panel_border_width or getattr( - self, "panel_border_width", "0" - ) - self.panel_border_width_dark = panel_border_width_dark or getattr( - self, "panel_border_width_dark", None - ) - self.section_header_text_size = section_header_text_size or getattr( - self, "section_header_text_size", "*text_md" - ) - self.section_header_text_weight = section_header_text_weight or getattr( - self, "section_header_text_weight", "400" - ) - # Component Atoms - self.chatbot_code_background_color = chatbot_code_background_color or getattr( - self, "chatbot_code_background_color", "*neutral_100" - ) - self.chatbot_code_background_color_dark = ( - chatbot_code_background_color_dark - or getattr(self, "chatbot_code_background_color_dark", "*neutral_800") - ) - self.checkbox_background_color = checkbox_background_color or getattr( - self, "checkbox_background_color", "*background_fill_primary" - ) - self.checkbox_background_color_dark = checkbox_background_color_dark or getattr( - self, "checkbox_background_color_dark", "*neutral_800" - ) - self.checkbox_background_color_focus = ( - checkbox_background_color_focus - or getattr( - self, "checkbox_background_color_focus", "*checkbox_background_color" - ) - ) - self.checkbox_background_color_focus_dark = ( - checkbox_background_color_focus_dark - or getattr( - self, - "checkbox_background_color_focus_dark", - "*checkbox_background_color", - ) - ) - self.checkbox_background_color_hover = ( - checkbox_background_color_hover - or getattr( - self, "checkbox_background_color_hover", "*checkbox_background_color" - ) - ) - self.checkbox_background_color_hover_dark = ( - checkbox_background_color_hover_dark - or getattr( - self, - "checkbox_background_color_hover_dark", - "*checkbox_background_color", - ) - ) - self.checkbox_background_color_selected = ( - checkbox_background_color_selected - or getattr(self, "checkbox_background_color_selected", "*secondary_600") - ) - self.checkbox_background_color_selected_dark = ( - checkbox_background_color_selected_dark - or getattr( - self, "checkbox_background_color_selected_dark", "*secondary_600" - ) - ) - self.checkbox_border_color = checkbox_border_color or getattr( - self, "checkbox_border_color", "*neutral_300" - ) - self.checkbox_border_color_dark = checkbox_border_color_dark or getattr( - self, "checkbox_border_color_dark", "*neutral_700" - ) - self.checkbox_border_color_focus = checkbox_border_color_focus or getattr( - self, "checkbox_border_color_focus", "*secondary_500" - ) - self.checkbox_border_color_focus_dark = ( - checkbox_border_color_focus_dark - or getattr(self, "checkbox_border_color_focus_dark", "*secondary_500") - ) - self.checkbox_border_color_hover = checkbox_border_color_hover or getattr( - self, "checkbox_border_color_hover", "*neutral_300" - ) - self.checkbox_border_color_hover_dark = ( - checkbox_border_color_hover_dark - or getattr(self, "checkbox_border_color_hover_dark", "*neutral_600") - ) - self.checkbox_border_color_selected = checkbox_border_color_selected or getattr( - self, "checkbox_border_color_selected", "*secondary_600" - ) - self.checkbox_border_color_selected_dark = ( - checkbox_border_color_selected_dark - or getattr(self, "checkbox_border_color_selected_dark", "*secondary_600") - ) - self.checkbox_border_radius = checkbox_border_radius or getattr( - self, "checkbox_border_radius", "*radius_sm" - ) - self.checkbox_border_width = checkbox_border_width or getattr( - self, "checkbox_border_width", "*input_border_width" - ) - self.checkbox_border_width_dark = checkbox_border_width_dark or getattr( - self, "checkbox_border_width_dark", "*input_border_width" - ) - self.checkbox_label_background_fill = checkbox_label_background_fill or getattr( - self, "checkbox_label_background_fill", "*button_secondary_background_fill" - ) - self.checkbox_label_background_fill_dark = ( - checkbox_label_background_fill_dark - or getattr( - self, - "checkbox_label_background_fill_dark", - "*button_secondary_background_fill", - ) - ) - self.checkbox_label_background_fill_hover = ( - checkbox_label_background_fill_hover - or getattr( - self, - "checkbox_label_background_fill_hover", - "*button_secondary_background_fill_hover", - ) - ) - self.checkbox_label_background_fill_hover_dark = ( - checkbox_label_background_fill_hover_dark - or getattr( - self, - "checkbox_label_background_fill_hover_dark", - "*button_secondary_background_fill_hover", - ) - ) - self.checkbox_label_background_fill_selected = ( - checkbox_label_background_fill_selected - or getattr( - self, - "checkbox_label_background_fill_selected", - "*checkbox_label_background_fill", - ) - ) - self.checkbox_label_background_fill_selected_dark = ( - checkbox_label_background_fill_selected_dark - or getattr( - self, - "checkbox_label_background_fill_selected_dark", - "*checkbox_label_background_fill", - ) - ) - self.checkbox_label_border_color = checkbox_label_border_color or getattr( - self, "checkbox_label_border_color", "*border_color_primary" - ) - self.checkbox_label_border_color_dark = ( - checkbox_label_border_color_dark - or getattr( - self, "checkbox_label_border_color_dark", "*border_color_primary" - ) - ) - self.checkbox_label_border_color_hover = ( - checkbox_label_border_color_hover - or getattr( - self, - "checkbox_label_border_color_hover", - "*checkbox_label_border_color", - ) - ) - self.checkbox_label_border_color_hover_dark = ( - checkbox_label_border_color_hover_dark - or getattr( - self, - "checkbox_label_border_color_hover_dark", - "*checkbox_label_border_color", - ) - ) - self.checkbox_label_border_width = checkbox_label_border_width or getattr( - self, "checkbox_label_border_width", "*input_border_width" - ) - self.checkbox_label_border_width_dark = ( - checkbox_label_border_width_dark - or getattr(self, "checkbox_label_border_width_dark", "*input_border_width") - ) - self.checkbox_label_gap = checkbox_label_gap or getattr( - self, "checkbox_label_gap", "*spacing_lg" - ) - self.checkbox_label_padding = checkbox_label_padding or getattr( - self, "checkbox_label_padding", "*spacing_md calc(2 * *spacing_md)" - ) - self.checkbox_label_shadow = checkbox_label_shadow or getattr( - self, "checkbox_label_shadow", "none" - ) - self.checkbox_label_text_size = checkbox_label_text_size or getattr( - self, "checkbox_label_text_size", "*text_md" - ) - self.checkbox_label_text_weight = checkbox_label_text_weight or getattr( - self, "checkbox_label_text_weight", "400" - ) - self.checkbox_check = checkbox_check or getattr( - self, - "checkbox_check", - """url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e")""", - ) - self.radio_circle = radio_circle or getattr( - self, - "radio_circle", - """url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e")""", - ) - self.checkbox_shadow = checkbox_shadow or getattr( - self, "checkbox_shadow", "*input_shadow" - ) - self.checkbox_label_text_color = checkbox_label_text_color or getattr( - self, "checkbox_label_text_color", "*body_text_color" - ) - self.checkbox_label_text_color_dark = checkbox_label_text_color_dark or getattr( - self, "checkbox_label_text_color_dark", "*body_text_color" - ) - self.checkbox_label_text_color_selected = ( - checkbox_label_text_color_selected - or getattr( - self, "checkbox_label_text_color_selected", "*checkbox_label_text_color" - ) - ) - self.checkbox_label_text_color_selected_dark = ( - checkbox_label_text_color_selected_dark - or getattr( - self, - "checkbox_label_text_color_selected_dark", - "*checkbox_label_text_color", - ) - ) - self.error_background_fill = error_background_fill or getattr( - self, "error_background_fill", colors.red.c50 - ) - self.error_background_fill_dark = error_background_fill_dark or getattr( - self, "error_background_fill_dark", "*background_fill_primary" - ) - self.error_border_color = error_border_color or getattr( - self, "error_border_color", colors.red.c700 - ) - self.error_border_color_dark = error_border_color_dark or getattr( - self, "error_border_color_dark", colors.red.c500 - ) - self.error_border_width = error_border_width or getattr( - self, "error_border_width", "1px" - ) - self.error_border_width_dark = error_border_width_dark or getattr( - self, "error_border_width_dark", None - ) - self.error_text_color = error_text_color or getattr( - self, "error_text_color", colors.red.c700 - ) - self.error_text_color_dark = error_text_color_dark or getattr( - self, "error_text_color_dark", colors.red.c50 - ) - self.error_icon_color = error_icon_color or getattr( - self, "error_icon_color", colors.red.c700 - ) - self.error_icon_color_dark = error_icon_color_dark or getattr( - self, "error_icon_color_dark", colors.red.c500 - ) - self.input_background_fill = input_background_fill or getattr( - self, "input_background_fill", "*neutral_100" - ) - self.input_background_fill_dark = input_background_fill_dark or getattr( - self, "input_background_fill_dark", "*neutral_700" - ) - self.input_background_fill_focus = input_background_fill_focus or getattr( - self, "input_background_fill_focus", "*secondary_500" - ) - self.input_background_fill_focus_dark = ( - input_background_fill_focus_dark - or getattr(self, "input_background_fill_focus_dark", "*secondary_600") - ) - self.input_background_fill_hover = input_background_fill_hover or getattr( - self, "input_background_fill_hover", "*input_background_fill" - ) - self.input_background_fill_hover_dark = ( - input_background_fill_hover_dark - or getattr( - self, "input_background_fill_hover_dark", "*input_background_fill" - ) - ) - self.input_border_color = input_border_color or getattr( - self, "input_border_color", "*border_color_primary" - ) - self.input_border_color_dark = input_border_color_dark or getattr( - self, "input_border_color_dark", "*border_color_primary" - ) - self.input_border_color_focus = input_border_color_focus or getattr( - self, "input_border_color_focus", "*secondary_300" - ) - self.input_border_color_focus_dark = input_border_color_focus_dark or getattr( - self, "input_border_color_focus_dark", "*neutral_700" - ) - self.input_border_color_hover = input_border_color_hover or getattr( - self, "input_border_color_hover", "*input_border_color" - ) - self.input_border_color_hover_dark = input_border_color_hover_dark or getattr( - self, "input_border_color_hover_dark", "*input_border_color" - ) - self.input_border_width = input_border_width or getattr( - self, "input_border_width", "0px" - ) - self.input_border_width_dark = input_border_width_dark or getattr( - self, "input_border_width_dark", None - ) - self.input_padding = input_padding or getattr( - self, "input_padding", "*spacing_xl" - ) - self.input_placeholder_color = input_placeholder_color or getattr( - self, "input_placeholder_color", "*neutral_400" - ) - self.input_placeholder_color_dark = input_placeholder_color_dark or getattr( - self, "input_placeholder_color_dark", "*neutral_500" - ) - self.input_radius = input_radius or getattr(self, "input_radius", "*radius_lg") - self.input_shadow = input_shadow or getattr(self, "input_shadow", "none") - self.input_shadow_dark = input_shadow_dark or getattr( - self, "input_shadow_dark", None - ) - self.input_shadow_focus = input_shadow_focus or getattr( - self, "input_shadow_focus", "*input_shadow" - ) - self.input_shadow_focus_dark = input_shadow_focus_dark or getattr( - self, "input_shadow_focus_dark", None - ) - self.input_text_size = input_text_size or getattr( - self, "input_text_size", "*text_md" - ) - self.input_text_weight = input_text_weight or getattr( - self, "input_text_weight", "400" - ) - self.loader_color = loader_color or getattr( - self, "loader_color", "*color_accent" - ) - self.loader_color_dark = loader_color_dark or getattr( - self, "loader_color_dark", None - ) - self.prose_text_size = prose_text_size or getattr( - self, "prose_text_size", "*text_md" - ) - self.prose_text_weight = prose_text_weight or getattr( - self, "prose_text_weight", "400" - ) - self.prose_header_text_weight = prose_header_text_weight or getattr( - self, "prose_header_text_weight", "600" - ) - self.slider_color = slider_color or getattr(self, "slider_color", "auto") - self.slider_color_dark = slider_color_dark or getattr( - self, "slider_color_dark", None - ) - self.stat_background_fill = stat_background_fill or getattr( - self, "stat_background_fill", "*primary_300" - ) - self.stat_background_fill_dark = stat_background_fill_dark or getattr( - self, "stat_background_fill_dark", "*primary_500" - ) - self.table_border_color = table_border_color or getattr( - self, "table_border_color", "*neutral_300" - ) - self.table_border_color_dark = table_border_color_dark or getattr( - self, "table_border_color_dark", "*neutral_700" - ) - self.table_even_background_fill = table_even_background_fill or getattr( - self, "table_even_background_fill", "white" - ) - self.table_even_background_fill_dark = ( - table_even_background_fill_dark - or getattr(self, "table_even_background_fill_dark", "*neutral_950") - ) - self.table_odd_background_fill = table_odd_background_fill or getattr( - self, "table_odd_background_fill", "*neutral_50" - ) - self.table_odd_background_fill_dark = table_odd_background_fill_dark or getattr( - self, "table_odd_background_fill_dark", "*neutral_900" - ) - self.table_radius = table_radius or getattr(self, "table_radius", "*radius_lg") - self.table_row_focus = table_row_focus or getattr( - self, "table_row_focus", "*color_accent_soft" - ) - self.table_row_focus_dark = table_row_focus_dark or getattr( - self, "table_row_focus_dark", "*color_accent_soft" - ) - # Buttons - self.button_border_width = button_border_width or getattr( - self, "button_border_width", "*input_border_width" - ) - self.button_border_width_dark = button_border_width_dark or getattr( - self, "button_border_width_dark", "*input_border_width" - ) - self.button_cancel_background_fill = button_cancel_background_fill or getattr( - self, "button_cancel_background_fill", "*button_secondary_background_fill" - ) - self.button_cancel_background_fill_dark = ( - button_cancel_background_fill_dark - or getattr( - self, - "button_cancel_background_fill_dark", - "*button_secondary_background_fill", - ) - ) - self.button_cancel_background_fill_hover = ( - button_cancel_background_fill_hover - or getattr( - self, - "button_cancel_background_fill_hover", - "*button_cancel_background_fill", - ) - ) - self.button_cancel_background_fill_hover_dark = ( - button_cancel_background_fill_hover_dark - or getattr( - self, - "button_cancel_background_fill_hover_dark", - "*button_cancel_background_fill", - ) - ) - self.button_cancel_border_color = button_cancel_border_color or getattr( - self, "button_cancel_border_color", "*button_secondary_border_color" - ) - self.button_cancel_border_color_dark = ( - button_cancel_border_color_dark - or getattr( - self, - "button_cancel_border_color_dark", - "*button_secondary_border_color", - ) - ) - self.button_cancel_border_color_hover = ( - button_cancel_border_color_hover - or getattr( - self, - "button_cancel_border_color_hover", - "*button_cancel_border_color", - ) - ) - self.button_cancel_border_color_hover_dark = ( - button_cancel_border_color_hover_dark - or getattr( - self, - "button_cancel_border_color_hover_dark", - "*button_cancel_border_color", - ) - ) - self.button_cancel_text_color = button_cancel_text_color or getattr( - self, "button_cancel_text_color", "*button_secondary_text_color" - ) - self.button_cancel_text_color_dark = button_cancel_text_color_dark or getattr( - self, "button_cancel_text_color_dark", "*button_secondary_text_color" - ) - self.button_cancel_text_color_hover = button_cancel_text_color_hover or getattr( - self, "button_cancel_text_color_hover", "*button_cancel_text_color" - ) - self.button_cancel_text_color_hover_dark = ( - button_cancel_text_color_hover_dark - or getattr( - self, "button_cancel_text_color_hover_dark", "*button_cancel_text_color" - ) - ) - self.button_large_padding = button_large_padding or getattr( - self, "button_large_padding", "*spacing_lg calc(2 * *spacing_lg)" - ) - self.button_large_radius = button_large_radius or getattr( - self, "button_large_radius", "*radius_lg" - ) - self.button_large_text_size = button_large_text_size or getattr( - self, "button_large_text_size", "*text_lg" - ) - self.button_large_text_weight = button_large_text_weight or getattr( - self, "button_large_text_weight", "600" - ) - self.button_primary_background_fill = button_primary_background_fill or getattr( - self, "button_primary_background_fill", "*primary_200" - ) - self.button_primary_background_fill_dark = ( - button_primary_background_fill_dark - or getattr(self, "button_primary_background_fill_dark", "*primary_700") - ) - self.button_primary_background_fill_hover = ( - button_primary_background_fill_hover - or getattr( - self, - "button_primary_background_fill_hover", - "*button_primary_background_fill", - ) - ) - self.button_primary_background_fill_hover_dark = ( - button_primary_background_fill_hover_dark - or getattr( - self, - "button_primary_background_fill_hover_dark", - "*button_primary_background_fill", - ) - ) - self.button_primary_border_color = button_primary_border_color or getattr( - self, "button_primary_border_color", "*primary_200" - ) - self.button_primary_border_color_dark = ( - button_primary_border_color_dark - or getattr(self, "button_primary_border_color_dark", "*primary_600") - ) - self.button_primary_border_color_hover = ( - button_primary_border_color_hover - or getattr( - self, - "button_primary_border_color_hover", - "*button_primary_border_color", - ) - ) - self.button_primary_border_color_hover_dark = ( - button_primary_border_color_hover_dark - or getattr( - self, - "button_primary_border_color_hover_dark", - "*button_primary_border_color", - ) - ) - self.button_primary_text_color = button_primary_text_color or getattr( - self, "button_primary_text_color", "*primary_600" - ) - self.button_primary_text_color_dark = button_primary_text_color_dark or getattr( - self, "button_primary_text_color_dark", "white" - ) - self.button_primary_text_color_hover = ( - button_primary_text_color_hover - or getattr( - self, "button_primary_text_color_hover", "*button_primary_text_color" - ) - ) - self.button_primary_text_color_hover_dark = ( - button_primary_text_color_hover_dark - or getattr( - self, - "button_primary_text_color_hover_dark", - "*button_primary_text_color", - ) - ) - self.button_secondary_background_fill = ( - button_secondary_background_fill - or getattr(self, "button_secondary_background_fill", "*neutral_200") - ) - self.button_secondary_background_fill_dark = ( - button_secondary_background_fill_dark - or getattr(self, "button_secondary_background_fill_dark", "*neutral_600") - ) - self.button_secondary_background_fill_hover = ( - button_secondary_background_fill_hover - or getattr( - self, - "button_secondary_background_fill_hover", - "*button_secondary_background_fill", - ) - ) - self.button_secondary_background_fill_hover_dark = ( - button_secondary_background_fill_hover_dark - or getattr( - self, - "button_secondary_background_fill_hover_dark", - "*button_secondary_background_fill", - ) - ) - self.button_secondary_border_color = button_secondary_border_color or getattr( - self, "button_secondary_border_color", "*neutral_200" - ) - self.button_secondary_border_color_dark = ( - button_secondary_border_color_dark - or getattr(self, "button_secondary_border_color_dark", "*neutral_600") - ) - self.button_secondary_border_color_hover = ( - button_secondary_border_color_hover - or getattr( - self, - "button_secondary_border_color_hover", - "*button_secondary_border_color", - ) - ) - self.button_secondary_border_color_hover_dark = ( - button_secondary_border_color_hover_dark - or getattr( - self, - "button_secondary_border_color_hover_dark", - "*button_secondary_border_color", - ) - ) - self.button_secondary_text_color = button_secondary_text_color or getattr( - self, "button_secondary_text_color", "*neutral_700" - ) - self.button_secondary_text_color_dark = ( - button_secondary_text_color_dark - or getattr(self, "button_secondary_text_color_dark", "white") - ) - self.button_secondary_text_color_hover = ( - button_secondary_text_color_hover - or getattr( - self, - "button_secondary_text_color_hover", - "*button_secondary_text_color", - ) - ) - self.button_secondary_text_color_hover_dark = ( - button_secondary_text_color_hover_dark - or getattr( - self, - "button_secondary_text_color_hover_dark", - "*button_secondary_text_color", - ) - ) - self.button_shadow = button_shadow or getattr(self, "button_shadow", "none") - self.button_shadow_active = button_shadow_active or getattr( - self, "button_shadow_active", "none" - ) - self.button_shadow_hover = button_shadow_hover or getattr( - self, "button_shadow_hover", "none" - ) - self.button_small_padding = button_small_padding or getattr( - self, "button_small_padding", "*spacing_sm calc(2 * *spacing_sm)" - ) - self.button_small_radius = button_small_radius or getattr( - self, "button_small_radius", "*radius_lg" - ) - self.button_small_text_size = button_small_text_size or getattr( - self, "button_small_text_size", "*text_md" - ) - self.button_small_text_weight = button_small_text_weight or getattr( - self, "button_small_text_weight", "400" - ) - self.button_transition = button_transition or getattr( - self, "button_transition", "background-color 0.2s ease" - ) - return self diff --git a/spaces/DeepFloyd/IF/style.css b/spaces/DeepFloyd/IF/style.css deleted file mode 100644 index 17fb109b7cef35a21bc44dcefba0ea3c2913f0ee..0000000000000000000000000000000000000000 --- a/spaces/DeepFloyd/IF/style.css +++ /dev/null @@ -1,238 +0,0 @@ -/* -This CSS file is modified from: -https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/2794a3c3ba66115c307075098e713f572b08bf80/app.py -*/ - -h1 { - text-align: center; -} - -.gradio-container { - font-family: 'IBM Plex Sans', sans-serif; -} - -.gr-button { - color: white; - border-color: black; - background: black; -} - -input[type='range'] { - accent-color: black; -} - -.dark input[type='range'] { - accent-color: #dfdfdf; -} - -.container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} - -#gallery { - min-height: auto; - height: 185px; - margin-top: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; -} -#gallery .grid-wrap, #gallery .empty{ - height: 185px; - min-height: 185px; -} -#gallery .preview{ - height: 185px; - min-height: 185px!important; -} -#gallery>div>.h-full { - min-height: 20rem; -} - -.details:hover { - text-decoration: underline; -} - -.gr-button { - white-space: nowrap; -} - -.gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; -} - -#advanced-btn { - font-size: .7rem !important; - line-height: 19px; - margin-top: 12px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; -} - -#advanced-options { - display: none; - margin-bottom: 20px; -} - -.footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} - -.footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; -} - -.dark .footer { - border-color: #303030; -} - -.dark .footer>p { - background: #0b0f19; -} - -.acknowledgments h4 { - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; -} - -.animate-spin { - animation: spin 1s linear infinite; -} - -@keyframes spin { - from { - transform: rotate(0deg); - } - - to { - transform: rotate(360deg); - } -} - -#share-btn-container { - display: flex; - padding-left: 0.5rem !important; - padding-right: 0.5rem !important; - background-color: #000000; - justify-content: center; - align-items: center; - border-radius: 9999px !important; - width: 13rem; - margin-top: 10px; - margin-left: auto; -} - -#share-btn { - all: initial; - color: #ffffff; - font-weight: 600; - cursor: pointer; - font-family: 'IBM Plex Sans', sans-serif; - margin-left: 0.5rem !important; - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - right: 0; -} - -#share-btn * { - all: unset; -} - -#share-btn-container div:nth-child(-n+2) { - width: auto !important; - min-height: 0px !important; -} - -#share-btn-container .wrap { - display: none !important; -} - -.gr-form { - flex: 1 1 50%; - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -#prompt-container { - gap: 0; -} - -#prompt-text-input, -#negative-prompt-text-input { - padding: .45rem 0.625rem -} - -#component-16 { - border-top-width: 1px !important; - margin-top: 1em -} - -.image_duplication { - position: absolute; - width: 100px; - left: 50px -} - -#component-0 { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} - -#upscaled-image img { - object-fit: scale-down; -} -/* share button */ -#share-btn-container { - display: flex; - padding-left: 0.5rem !important; - padding-right: 0.5rem !important; - background-color: #000000; - justify-content: center; - align-items: center; - border-radius: 9999px !important; - width: 13rem; - margin-top: 10px; - margin-left: auto; - flex: unset !important; -} -#share-btn { - all: initial; - color: #ffffff; - font-weight: 600; - cursor: pointer; - font-family: 'IBM Plex Sans', sans-serif; - margin-left: 0.5rem !important; - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - right:0; -} -#share-btn * { - all: unset !important; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} \ No newline at end of file diff --git a/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/README.md b/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/README.md deleted file mode 100644 index 38496e90a265885345a4de29fe0f50fe02a8c6c9..0000000000000000000000000000000000000000 --- a/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 語音質檢+噪音去除:Meta Denoiser -emoji: 📉 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.22.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/__init__.py b/spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/__init__.py deleted file mode 100644 index d2a7efe79d871852affd9de7b46f726a7942f218..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -from .fused_act import FusedLeakyReLU, fused_leaky_relu -from .upfirdn2d import upfirdn2d diff --git a/spaces/Dragonnnext/Unicorn-proxy/README.md b/spaces/Dragonnnext/Unicorn-proxy/README.md deleted file mode 100644 index 5a6e3ea331ab5ff4b3da0715437b17f97d3c7f93..0000000000000000000000000000000000000000 --- a/spaces/Dragonnnext/Unicorn-proxy/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Unicorn OAI Proxy -emoji: 🦄 -sdk: docker -colorFrom: gray -colorTo: gray -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Duskfallcrew/duskfall-s-general-digital-art-model/app.py b/spaces/Duskfallcrew/duskfall-s-general-digital-art-model/app.py deleted file mode 100644 index 9fed0cde7af517d619b8c498061b2f76f312a53b..0000000000000000000000000000000000000000 --- a/spaces/Duskfallcrew/duskfall-s-general-digital-art-model/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'MultiversexPeeps/duskfall-s-general-digital-art-model' -prefix = 'gendigi' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
      -
      -

      Duskfall S General Digital Art Model

      -
      -

      - Demo for Duskfall S General Digital Art Model Stable Diffusion model.
      If you want to donate towards costs and don't want to subscribe: https://ko-fi.com/DUSKFALLcrew / All samples and info are here: https://civitai.com/user/duskfallcrew
      Use " gendigi " as your prefix token.
      - {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

      - Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space

      - Duplicate Space -
      - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically (gendigi)", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
      -
      -

      This space was created using SD Space Creator.

      -
      - """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/archs/discriminator_arch.py b/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/archs/discriminator_arch.py deleted file mode 100644 index 4b66ab1226d6793de846bc9828bbe427031a0e2d..0000000000000000000000000000000000000000 --- a/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/archs/discriminator_arch.py +++ /dev/null @@ -1,67 +0,0 @@ -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn as nn -from torch.nn import functional as F -from torch.nn.utils import spectral_norm - - -@ARCH_REGISTRY.register() -class UNetDiscriminatorSN(nn.Module): - """Defines a U-Net discriminator with spectral normalization (SN) - - It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - Arg: - num_in_ch (int): Channel number of inputs. Default: 3. - num_feat (int): Channel number of base intermediate features. Default: 64. - skip_connection (bool): Whether to use skip connections between U-Net. Default: True. - """ - - def __init__(self, num_in_ch, num_feat=64, skip_connection=True): - super(UNetDiscriminatorSN, self).__init__() - self.skip_connection = skip_connection - norm = spectral_norm - # the first convolution - self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) - # downsample - self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) - self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) - self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) - # upsample - self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) - self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) - self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) - # extra convolutions - self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) - - def forward(self, x): - # downsample - x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) - x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) - x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) - x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) - - # upsample - x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) - x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x4 = x4 + x2 - x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False) - x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x5 = x5 + x1 - x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False) - x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x6 = x6 + x0 - - # extra convolutions - out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) - out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) - out = self.conv9(out) - - return out diff --git a/spaces/EvanMarie/faces_three/README.md b/spaces/EvanMarie/faces_three/README.md deleted file mode 100644 index ae0b1e3869ec262d7831dc02e3fbc70f4a05581c..0000000000000000000000000000000000000000 --- a/spaces/EvanMarie/faces_three/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Happy, Sad, or Angry -emoji: 😀 😥 😡 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/models/experimental.py b/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/models/experimental.py deleted file mode 100644 index 37ba4c4420789c92dc0e2aaeb3d5b64859ec728c..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/models/experimental.py +++ /dev/null @@ -1,45 +0,0 @@ -# # This file contains experimental modules - -import numpy as np -import torch -from torch import nn - -from facelib.detection.yolov5face.models.common import Conv - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super().__init__() - groups = len(k) - if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1e-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) - - def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" deleted file mode 100644 index e57f80f1d45bd3ec23837253848f7b32a5ccd751..0000000000000000000000000000000000000000 --- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" +++ /dev/null @@ -1,138 +0,0 @@ -import threading -from request_llm.bridge_all import predict_no_ui_long_connection -from toolbox import update_ui -from toolbox import CatchException, write_results_to_file, report_execption -from .crazy_utils import breakdown_txt_to_satisfy_token_limit - -def extract_code_block_carefully(txt): - splitted = txt.split('```') - n_code_block_seg = len(splitted) - 1 - if n_code_block_seg <= 1: return txt - # 剩下的情况都开头除去 ``` 结尾除去一次 ``` - txt_out = '```'.join(splitted[1:-1]) - return txt_out - - - -def break_txt_into_half_at_some_linebreak(txt): - lines = txt.split('\n') - n_lines = len(lines) - pre = lines[:(n_lines//2)] - post = lines[(n_lines//2):] - return "\n".join(pre), "\n".join(post) - - -@CatchException -def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port): - # 第1步:清空历史,以免输入溢出 - history = [] - - # 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 第3步:集合文件 - import time, glob, os, shutil, re - os.makedirs('gpt_log/generated_english_version', exist_ok=True) - os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True) - file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ - [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)] - # file_manifest = ['./toolbox.py'] - i_say_show_user_buffer = [] - - # 第4步:随便显示点什么防止卡顿的感觉 - for index, fp in enumerate(file_manifest): - # if 'test_project' in fp: continue - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}' - i_say_show_user_buffer.append(i_say_show_user) - chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - - # 第5步:Token限制下的截断与处理 - MAX_TOKEN = 3000 - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=())) - - - # 第6步:任务函数 - mutable_return = [None for _ in file_manifest] - observe_window = [[""] for _ in file_manifest] - def thread_worker(fp,index): - if index > 10: - time.sleep(60) - print('Openai 限制免费用户每分钟20次请求,降低请求频率中。') - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```' - try: - gpt_say = "" - # 分解代码文件 - file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN) - for file_content_partial in file_content_breakdown: - i_say = i_say_template(fp, file_content_partial) - # # ** gpt request ** - gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index]) - gpt_say_partial = extract_code_block_carefully(gpt_say_partial) - gpt_say += gpt_say_partial - mutable_return[index] = gpt_say - except ConnectionAbortedError as token_exceed_err: - print('至少一个线程任务Token溢出而失败', e) - except Exception as e: - print('至少一个线程任务意外失败', e) - - # 第7步:所有线程同时开始执行任务函数 - handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)] - for h in handles: - h.daemon = True - h.start() - chatbot.append(('开始了吗?', f'多线程操作已经开始')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 第8步:循环轮询各个线程是否执行完毕 - cnt = 0 - while True: - cnt += 1 - time.sleep(0.2) - th_alive = [h.is_alive() for h in handles] - if not any(th_alive): break - # 更好的UI视觉效果 - observe_win = [] - for thread_index, alive in enumerate(th_alive): - observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('
      ','.....').replace('$','.')+"... ]") - stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)] - stat_str = ''.join(stat) - chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1))) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 第9步:把结果写入文件 - for index, h in enumerate(handles): - h.join() # 这里其实不需要join了,肯定已经都结束了 - fp = file_manifest[index] - gpt_say = mutable_return[index] - i_say_show_user = i_say_show_user_buffer[index] - - where_to_relocate = f'gpt_log/generated_english_version/{fp}' - if gpt_say is not None: - with open(where_to_relocate, 'w+', encoding='utf-8') as f: - f.write(gpt_say) - else: # 失败 - shutil.copyfile(file_manifest[index], where_to_relocate) - chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}')) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(1) - - # 第10步:备份一个文件 - res = write_results_to_file(history) - chatbot.append(("生成一份任务执行报告", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/spaces/GXSA/bingo/src/lib/bots/bing/sr.ts b/spaces/GXSA/bingo/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/Gaofish/AI_bing/README.md b/spaces/Gaofish/AI_bing/README.md deleted file mode 100644 index b3750b18ce066814089a9e7fe8b3350cc84090c6..0000000000000000000000000000000000000000 --- a/spaces/Gaofish/AI_bing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI Bing -emoji: 🐠 -colorFrom: yellow -colorTo: indigo -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/interface.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/interface.py deleted file mode 100644 index cebf9fc96b819b6133d2bfb8ba9b1796397c7454..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/interface.py +++ /dev/null @@ -1,206 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import glob -import os -from basicsr.archs.rrdbnet_arch import RRDBNet -from basicsr.utils.download_util import load_file_from_url - -from realesrgan import RealESRGANer -from realesrgan.archs.srvgg_arch import SRVGGNetCompact - - -def realEsrgan( - model_name="RealESRGAN_x4plus_anime_6B", - model_path=None, - input_dir="inputs", - output_dir="results", - denoise_strength=0.5, - outscale=4, - suffix="out", - tile=200, - tile_pad=10, - pre_pad=0, - face_enhance=True, - alpha_upsampler="realsrgan", - out_ext="auto", - fp32=True, - gpu_id=None, -): - - # determine models according to model names - model_name = model_name.split(".")[0] - if model_name == "RealESRGAN_x4plus": # x4 RRDBNet model - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=4, - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth" - ] - elif model_name == "RealESRNet_x4plus": # x4 RRDBNet model - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=4, - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth" - ] - elif model_name == "RealESRGAN_x4plus_anime_6B": # x4 RRDBNet model with 6 blocks - model = RRDBNet( - num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4 - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth" - ] - elif model_name == "RealESRGAN_x2plus": # x2 RRDBNet model - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=2, - ) - netscale = 2 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth" - ] - elif model_name == "realesr-animevideov3": # x4 VGG-style model (XS size) - model = SRVGGNetCompact( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_conv=16, - upscale=4, - act_type="prelu", - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth" - ] - elif model_name == "realesr-general-x4v3": # x4 VGG-style model (S size) - model = SRVGGNetCompact( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_conv=32, - upscale=4, - act_type="prelu", - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth", - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth", - ] - - # determine model paths - if model_path is None: - model_path = os.path.join("weights", model_name + ".pth") - if not os.path.isfile(model_path): - ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - for url in file_url: - # model_path will be updated - model_path = load_file_from_url( - url=url, - model_dir=os.path.join(ROOT_DIR, "weights"), - progress=True, - file_name=None, - ) - - # use dni to control the denoise strength - dni_weight = None - if model_name == "realesr-general-x4v3" and denoise_strength != 1: - wdn_model_path = model_path.replace( - "realesr-general-x4v3", "realesr-general-wdn-x4v3" - ) - model_path = [model_path, wdn_model_path] - dni_weight = [denoise_strength, 1 - denoise_strength] - - # restorer - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - dni_weight=dni_weight, - model=model, - tile=tile, - tile_pad=tile_pad, - pre_pad=pre_pad, - half=not fp32, - gpu_id=gpu_id, - ) - - if face_enhance: # Use GFPGAN for face enhancement - from gfpgan import GFPGANer - - face_enhancer = GFPGANer( - model_path="https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth", - upscale=outscale, - arch="clean", - channel_multiplier=2, - bg_upsampler=upsampler, - ) - os.makedirs(output_dir, exist_ok=True) - - if not isinstance(input_dir, list): - paths = [input_dir] - else: - paths = sorted(glob.glob(os.path.join(input_dir, "*"))) - - Imgs = [] - for idx, path in enumerate(paths): - print(f"Scaling x{outscale}:", path) - if isinstance(path, Image.Image): - img = path - img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) - imgname = f"img_{idx}" - else: - imgname, extension = os.path.splitext(os.path.basename(path)) - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = "RGBA" - else: - img_mode = None - - try: - if face_enhance: - _, _, output = face_enhancer.enhance( - img, has_aligned=False, only_center_face=False, paste_back=True - ) - else: - output, _ = upsampler.enhance(img, outscale=outscale) - except RuntimeError as error: - print("Error", error) - print( - "If you encounter CUDA or RAM out of memory, try to set --tile with a smaller number." - ) - else: - # if out_ext == "auto": - # extension = extension[1:] - # else: - # extension = out_ext - # if img_mode == "RGBA": # RGBA images should be saved in png format - # extension = "png" - # if suffix == "": - # save_path = os.path.join(output_dir, f"{imgname}.{extension}") - # else: - # save_path = os.path.join(output_dir, f"{imgname}_{suffix}.{extension}") - # - # cv2.imwrite(save_path, output) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = Image.fromarray(img) - Imgs.append(img) - - return Imgs diff --git a/spaces/Gradio-Blocks/Pipeline-Tester/README.md b/spaces/Gradio-Blocks/Pipeline-Tester/README.md deleted file mode 100644 index 021dd2a4529ff4161f72d7977cab03bbcab9907f..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/Pipeline-Tester/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Pipeline Tester -emoji: 🏃 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.0.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/styleclip/styleclip_global.py b/spaces/Gradio-Blocks/StyleGAN-NADA/styleclip/styleclip_global.py deleted file mode 100644 index 96fa7569ebd51a5e6c2deddb57ccceb4f4376904..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/styleclip/styleclip_global.py +++ /dev/null @@ -1,181 +0,0 @@ -''' -Code adapted from Stitch it in Time by Tzaban et al. -https://github.com/rotemtzaban/STIT -''' - - -import numpy as np -import torch -from tqdm import tqdm -from pathlib import Path -import os - -import clip - -imagenet_templates = [ - 'a bad photo of a {}.', - 'a photo of many {}.', - 'a sculpture of a {}.', - 'a photo of the hard to see {}.', - 'a low resolution photo of the {}.', - 'a rendering of a {}.', - 'graffiti of a {}.', - 'a bad photo of the {}.', - 'a cropped photo of the {}.', - 'a tattoo of a {}.', - 'the embroidered {}.', - 'a photo of a hard to see {}.', - 'a bright photo of a {}.', - 'a photo of a clean {}.', - 'a photo of a dirty {}.', - 'a dark photo of the {}.', - 'a drawing of a {}.', - 'a photo of my {}.', - 'the plastic {}.', - 'a photo of the cool {}.', - 'a close-up photo of a {}.', - 'a black and white photo of the {}.', - 'a painting of the {}.', - 'a painting of a {}.', - 'a pixelated photo of the {}.', - 'a sculpture of the {}.', - 'a bright photo of the {}.', - 'a cropped photo of a {}.', - 'a plastic {}.', - 'a photo of the dirty {}.', - 'a jpeg corrupted photo of a {}.', - 'a blurry photo of the {}.', - 'a photo of the {}.', - 'a good photo of the {}.', - 'a rendering of the {}.', - 'a {} in a video game.', - 'a photo of one {}.', - 'a doodle of a {}.', - 'a close-up photo of the {}.', - 'a photo of a {}.', - 'the origami {}.', - 'the {} in a video game.', - 'a sketch of a {}.', - 'a doodle of the {}.', - 'a origami {}.', - 'a low resolution photo of a {}.', - 'the toy {}.', - 'a rendition of the {}.', - 'a photo of the clean {}.', - 'a photo of a large {}.', - 'a rendition of a {}.', - 'a photo of a nice {}.', - 'a photo of a weird {}.', - 'a blurry photo of a {}.', - 'a cartoon {}.', - 'art of a {}.', - 'a sketch of the {}.', - 'a embroidered {}.', - 'a pixelated photo of a {}.', - 'itap of the {}.', - 'a jpeg corrupted photo of the {}.', - 'a good photo of a {}.', - 'a plushie {}.', - 'a photo of the nice {}.', - 'a photo of the small {}.', - 'a photo of the weird {}.', - 'the cartoon {}.', - 'art of the {}.', - 'a drawing of the {}.', - 'a photo of the large {}.', - 'a black and white photo of a {}.', - 'the plushie {}.', - 'a dark photo of a {}.', - 'itap of a {}.', - 'graffiti of the {}.', - 'a toy {}.', - 'itap of my {}.', - 'a photo of a cool {}.', - 'a photo of a small {}.', - 'a tattoo of the {}.', -] - -CONV_CODE_INDICES = [(0, 512), (1024, 1536), (1536, 2048), (2560, 3072), (3072, 3584), (4096, 4608), (4608, 5120), (5632, 6144), (6144, 6656), (7168, 7680), (7680, 7936), (8192, 8448), (8448, 8576), (8704, 8832), (8832, 8896), (8960, 9024), (9024, 9056)] -FFHQ_CODE_INDICES = [(0, 512), (512, 1024), (1024, 1536), (1536, 2048), (2560, 3072), (3072, 3584), (4096, 4608), (4608, 5120), (5632, 6144), (6144, 6656), (7168, 7680), (7680, 7936), (8192, 8448), (8448, 8576), (8704, 8832), (8832, 8896), (8960, 9024), (9024, 9056)] + \ - [(2048, 2560), (3584, 4096), (5120, 5632), (6656, 7168), (7936, 8192), (8576, 8704), (8896, 8960), (9056, 9088)] - -def zeroshot_classifier(model, classnames, templates, device): - - with torch.no_grad(): - zeroshot_weights = [] - for classname in tqdm(classnames): - texts = [template.format(classname) for template in templates] # format with class - texts = clip.tokenize(texts).to(device) # tokenize - class_embeddings = model.encode_text(texts) # embed with text encoder - class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) - class_embedding = class_embeddings.mean(dim=0) - class_embedding /= class_embedding.norm() - zeroshot_weights.append(class_embedding) - zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device) - return zeroshot_weights - -def expand_to_full_dim(partial_tensor): - full_dim_tensor = torch.zeros(size=(1, 9088)) - - start_idx = 0 - for conv_start, conv_end in CONV_CODE_INDICES: - length = conv_end - conv_start - full_dim_tensor[:, conv_start:conv_end] = partial_tensor[start_idx:start_idx + length] - start_idx += length - - return full_dim_tensor - -def get_direction(neutral_class, target_class, beta, di, clip_model=None): - - device = "cuda" if torch.cuda.is_available() else "cpu" - - if clip_model is None: - clip_model, _ = clip.load("ViT-B/32", device=device) - - class_names = [neutral_class, target_class] - class_weights = zeroshot_classifier(clip_model, class_names, imagenet_templates, device) - - dt = class_weights[:, 1] - class_weights[:, 0] - dt = dt / dt.norm() - - dt = dt.float() - di = di.float() - - relevance = di @ dt - mask = relevance.abs() > beta - direction = relevance * mask - direction_max = direction.abs().max() - if direction_max > 0: - direction = direction / direction_max - else: - raise ValueError(f'Beta value {beta} is too high for mapping from {neutral_class} to {target_class},' - f' try setting it to a lower value') - return direction - -def style_tensor_to_style_dict(style_tensor, refernce_generator): - style_layers = refernce_generator.modulation_layers - - style_dict = {} - for layer_idx, layer in enumerate(style_layers): - style_dict[layer] = style_tensor[:, FFHQ_CODE_INDICES[layer_idx][0]:FFHQ_CODE_INDICES[layer_idx][1]] - - return style_dict - -def style_dict_to_style_tensor(style_dict, reference_generator): - style_layers = reference_generator.modulation_layers - - style_tensor = torch.zeros(size=(1, 9088)) - for layer in style_dict: - layer_idx = style_layers.index(layer) - style_tensor[:, FFHQ_CODE_INDICES[layer_idx][0]:FFHQ_CODE_INDICES[layer_idx][1]] = style_dict[layer] - - return style_tensor - -def project_code_with_styleclip(source_latent, source_class, target_class, alpha, beta, reference_generator, di, clip_model=None): - edit_direction = get_direction(source_class, target_class, beta, di, clip_model) - - edit_full_dim = expand_to_full_dim(edit_direction) - - source_s = style_dict_to_style_tensor(source_latent, reference_generator) - - return source_s + alpha * edit_full_dim \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py deleted file mode 100644 index dbe88770ae5dffbed5229ed4a4e62f10b1c8d12b..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' -# model settings -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py deleted file mode 100644 index 1cac759ab66323cf034f21a9afff770f79c10035..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' -model = dict( - pretrained='open-mmlab://res2net101_v1d_26w_4s', - backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py deleted file mode 100644 index e01a9eff6197fb80e3a541910c9b94c00510323e..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py +++ /dev/null @@ -1,140 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_swin_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - ape=False, - drop_path_rate=0.2, - patch_norm=True, - use_checkpoint=False - ), - neck=dict(in_channels=[96, 192, 384, 768]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py deleted file mode 100644 index 032dc8b6219421698c3a1bfb4bca5addfeea1ab3..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_480x480_80k_pascal_context_59.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 0b0207b3144460d25229e3ac4c4d0d9fc1d34292..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/inverse_sqrt_lr_scheduler.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/inverse_sqrt_lr_scheduler.py deleted file mode 100644 index 920192e8842c5635bf6f7f76618fa4a6f4b0114a..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/inverse_sqrt_lr_scheduler.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from torch.optim import Optimizer -from torch.optim.lr_scheduler import _LRScheduler - - -class InverseSquareRootLRScheduler(_LRScheduler): - """Inverse square root LR scheduler. - - Args: - optimizer (Optimizer): Torch optimizer. - warmup_steps (int): Number of warmup steps. - warmup_init_lr (tp.Optional[float]): Initial learning rate - during warmup phase. When not set, use the provided learning rate. - """ - def __init__(self, optimizer: Optimizer, warmup_steps: int, warmup_init_lr: tp.Optional[float] = 0): - self.warmup_steps = warmup_steps - self.warmup_init_lr = warmup_init_lr - super().__init__(optimizer) - - def _get_sched_lr(self, lr: float, step: int): - if step < self.warmup_steps: - warmup_init_lr = self.warmup_init_lr or 0 - lr_step = (lr - warmup_init_lr) / self.warmup_steps - lr = warmup_init_lr + step * lr_step - else: - decay_factor = lr * self.warmup_steps**0.5 - lr = decay_factor * step**-0.5 - return lr - - def get_lr(self): - return [self._get_sched_lr(base_lr, self._step_count) for base_lr in self.base_lrs] diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/models/lm.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/models/lm.py deleted file mode 100644 index c8aad8f06797eef3293605056e1de14d07c56c2a..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/models/lm.py +++ /dev/null @@ -1,527 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -from functools import partial -import logging -import math -import typing as tp - -import torch -from torch import nn - -from ..utils import utils -from ..modules.streaming import StreamingModule, State -from ..modules.transformer import StreamingTransformer, create_norm_fn -from ..modules.conditioners import ( - ConditionFuser, - ClassifierFreeGuidanceDropout, - AttributeDropout, - ConditioningProvider, - ConditioningAttributes, - ConditionType, -) -from ..modules.codebooks_patterns import CodebooksPatternProvider -from ..modules.activations import get_activation_fn - - -logger = logging.getLogger(__name__) -ConditionTensors = tp.Dict[str, ConditionType] -CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]] - - -def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None): - """LM layer initialization. - Inspired from xlformers: https://github.com/fairinternal/xlformers - - Args: - method (str): Method name for init function. Valid options are: - 'gaussian', 'uniform'. - input_dim (int): Input dimension of the initialized module. - init_depth (Optional[int]): Optional init depth value used to rescale - the standard deviation if defined. - """ - # Compute std - std = 1 / math.sqrt(input_dim) - # Rescale with depth - if init_depth is not None: - std = std / math.sqrt(2 * init_depth) - - if method == 'gaussian': - return partial( - torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std - ) - elif method == 'uniform': - bound = math.sqrt(3) * std # ensure the standard deviation is `std` - return partial(torch.nn.init.uniform_, a=-bound, b=bound) - else: - raise ValueError("Unsupported layer initialization method") - - -def init_layer(m: nn.Module, - method: str, - init_depth: tp.Optional[int] = None, - zero_bias_init: bool = False): - """Wrapper around ``get_init_fn`` for proper initialization of LM modules. - - Args: - m (nn.Module): Module to initialize. - method (str): Method name for the init function. - init_depth (Optional[int]): Optional init depth value used to rescale - the standard deviation if defined. - zero_bias_init (bool): Whether to initialize the bias to 0 or not. - """ - if isinstance(m, nn.Linear): - init_fn = get_init_fn(method, m.in_features, init_depth=init_depth) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - if zero_bias_init and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Embedding): - init_fn = get_init_fn(method, m.embedding_dim, init_depth=None) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - - -class ScaledEmbedding(nn.Embedding): - """Boost learning rate for embeddings (with `scale`). - """ - def __init__(self, *args, lr=None, **kwargs): - super().__init__(*args, **kwargs) - self.lr = lr - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - return group - - -@dataclass -class LMOutput: - # The logits are already re-aligned with the input codes - # hence no extra shift is required, e.g. when computing CE - logits: torch.Tensor # [B, K, T, card] - mask: torch.Tensor # [B, K, T] - - -class LMModel(StreamingModule): - """Transformer-based language model on multiple streams of codes. - - Args: - pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving. - condition_provider (MusicConditioningProvider): Conditioning provider from metadata. - fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input. - n_q (int): Number of parallel streams to model. - card (int): Cardinality, vocabulary size. - dim (int): Dimension of the transformer encoder. - num_heads (int): Number of heads for the transformer encoder. - hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder. - norm (str): Normalization method. - norm_first (bool): Use pre-norm instead of post-norm. - emb_lr (Optional[float]): Embedding-specific learning rate. - bias_proj (bool): Use bias for output projections. - weight_init (Optional[str]): Method for weight initialization. - depthwise_init (Optional[str]): Method for depthwise weight initialization. - zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros. - cfg_dropout (float): Classifier-free guidance dropout. - cfg_coef (float): Classifier-free guidance coefficient. - attribute_dropout (dict): Attribute dropout probabilities. - two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps. - **kwargs: Additional parameters for the transformer encoder. - """ - def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider, - fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8, - hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False, - emb_lr: tp.Optional[float] = None, bias_proj: bool = True, - weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None, - zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0, - attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False, - **kwargs): - super().__init__() - self.cfg_coef = cfg_coef - self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout) - self.att_dropout = AttributeDropout(p=attribute_dropout) - self.condition_provider = condition_provider - self.fuser = fuser - self.card = card - embed_dim = self.card + 1 - self.n_q = n_q - self.dim = dim - self.pattern_provider = pattern_provider - self.two_step_cfg = two_step_cfg - self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)]) - if 'activation' in kwargs: - kwargs['activation'] = get_activation_fn(kwargs['activation']) - self.transformer = StreamingTransformer( - d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim), - norm=norm, norm_first=norm_first, **kwargs) - self.out_norm: tp.Optional[nn.Module] = None - if norm_first: - self.out_norm = create_norm_fn(norm, dim) - self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)]) - self._init_weights(weight_init, depthwise_init, zero_bias_init) - self._fsdp: tp.Optional[nn.Module] - self.__dict__['_fsdp'] = None - - def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool): - """Initialization of the transformer module weights. - - Args: - weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options. - depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid: - 'current' where the depth corresponds to the current layer index or 'global' where the total number - of layer is used as depth. If not set, no depthwise initialization strategy is used. - zero_bias_init (bool): Whether to initalize bias to zero or not. - """ - assert depthwise_init is None or depthwise_init in ['current', 'global'] - assert depthwise_init is None or weight_init is not None, \ - "If 'depthwise_init' is defined, a 'weight_init' method should be provided." - assert not zero_bias_init or weight_init is not None, \ - "If 'zero_bias_init', a 'weight_init' method should be provided" - - if weight_init is None: - return - - for emb_layer in self.emb: - init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - for layer_idx, tr_layer in enumerate(self.transformer.layers): - depth = None - if depthwise_init == 'current': - depth = layer_idx + 1 - elif depthwise_init == 'global': - depth = len(self.transformer.layers) - init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init) - tr_layer.apply(init_fn) - - for linear in self.linears: - init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - @property - def special_token_id(self) -> int: - return self.card - - @property - def num_codebooks(self) -> int: - return self.n_q - - def forward(self, sequence: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor: - """Apply language model on sequence and conditions. - Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and - S the sequence steps, return the logits with shape [B, card, K, S]. - - Args: - indices (torch.Tensor): indices of the codes to model. - conditions (list[ConditioningAttributes]): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning - tensors, see `conditions`. - Returns: - torch.Tensor: Logits. - """ - B, K, S = sequence.shape - assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks' - input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)]) - if condition_tensors is None: - assert not self._is_streaming, "Conditions tensors should be precomputed when streaming." - # apply dropout modules - conditions = self.cfg_dropout(conditions) - conditions = self.att_dropout(conditions) - tokenized = self.condition_provider.tokenize(conditions) - # encode conditions and fuse, both have a streaming cache to not recompute when generating. - condition_tensors = self.condition_provider(tokenized) - else: - assert not conditions, "Shouldn't pass both conditions and condition_tensors." - - input_, cross_attention_input = self.fuser(input_, condition_tensors) - - out = self.transformer(input_, cross_attention_src=cross_attention_input) - if self.out_norm: - out = self.out_norm(out) - logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card] - - # remove the prefix from the model outputs - if len(self.fuser.fuse2cond['prepend']) > 0: - logits = logits[:, :, -S:] - - return logits # [B, K, S, card] - - def compute_predictions( - self, codes: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput: - """Given an input tensor of codes [B, K, T] and list of conditions, runs the model - forward using the specified codes interleaving pattern. - - Args: - codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size, - K the number of codebooks and T the number of timesteps. - conditions (list[ConditioningAttributes]): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning - tensors, see `conditions`. - Returns: - LMOutput: Language model outputs - logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes, - i.e. the first item corresponds to logits to predict the first code, meaning that - no additional shifting of codes and logits is required. - mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions. - Given the specified interleaving strategies, parts of the logits and codes should - not be considered as valid predictions because of invalid context. - """ - B, K, T = codes.shape - codes = codes.contiguous() - # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens - pattern = self.pattern_provider.get_pattern(T) - sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence( - codes, self.special_token_id, keep_only_valid_steps=True - ) - # apply model on pattern sequence - model = self if self._fsdp is None else self._fsdp - logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card] - # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card] - # and provide the corresponding mask over invalid positions of tokens - logits = logits.permute(0, 3, 1, 2) # [B, card, K, S] - # note: we use nans as special token to make it obvious if we feed unexpected logits - logits, logits_indexes, logits_mask = pattern.revert_pattern_logits( - logits, float('nan'), keep_only_valid_steps=True - ) - logits = logits.permute(0, 2, 3, 1) # [B, K, T, card] - logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T] - return LMOutput(logits, logits_mask) - - def _sample_next_token(self, - sequence: torch.Tensor, - cfg_conditions: CFGConditions, - unconditional_state: State, - use_sampling: bool = False, - temp: float = 1.0, - top_k: int = 0, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None) -> torch.Tensor: - """Sample next token from the model given a sequence and a set of conditions. The model supports - multiple sampling strategies (greedy sampling, softmax, top-k, top-p...). - - Args: - sequence (torch.Tensor): Current sequence of shape [B, K, S] - with K corresponding to the number of codebooks and S the number of sequence steps. - S = 1 in streaming mode, except for the first step that contains a bigger prompt. - condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used, - should be twice the batch size, being the concatenation of the conditions + null conditions. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - cfg_coef (float): classifier free guidance coefficient - Returns: - next_token (torch.Tensor): Next token tensor of shape [B, K, 1]. - """ - B = sequence.shape[0] - cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef - model = self if self._fsdp is None else self._fsdp - if self.two_step_cfg and cfg_conditions != {}: - assert isinstance(cfg_conditions, tuple) - condition_tensors, null_condition_tensors = cfg_conditions - cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors) - state = self.get_streaming_state() - self.set_streaming_state(unconditional_state) - uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors) - unconditional_state.update(self.get_streaming_state()) - self.set_streaming_state(state) - logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef - else: - assert isinstance(cfg_conditions, dict) - condition_tensors = cfg_conditions - if condition_tensors: - # Preparing for CFG, predicting both conditional and unconditional logits. - sequence = torch.cat([sequence, sequence], dim=0) - all_logits = model( - sequence, - conditions=[], condition_tensors=condition_tensors) - if condition_tensors: - cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card] - logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef - else: - logits = all_logits - - logits = logits.permute(0, 1, 3, 2) # [B, K, card, T] - logits = logits[..., -1] # [B x K x card] - - # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error. - if use_sampling and temp > 0.0: - probs = torch.softmax(logits / temp, dim=-1) - if top_p > 0.0: - next_token = utils.sample_top_p(probs, p=top_p) - elif top_k > 0: - next_token = utils.sample_top_k(probs, k=top_k) - else: - next_token = utils.multinomial(probs, num_samples=1) - else: - next_token = torch.argmax(logits, dim=-1, keepdim=True) - - return next_token - - @torch.no_grad() - def generate(self, - prompt: tp.Optional[torch.Tensor] = None, - conditions: tp.List[ConditioningAttributes] = [], - num_samples: tp.Optional[int] = None, - max_gen_len: int = 256, - use_sampling: bool = True, - temp: float = 1.0, - top_k: int = 250, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None, - two_step_cfg: bool = False, - remove_prompts: bool = False, - check: bool = False, - callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor: - """Generate tokens sampling from the model given a prompt or unconditionally. Generation can - be perform in a greedy fashion or using sampling with top K and top P strategies. - - Args: - prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T]. - conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None. - num_samples (int or None): Number of samples to generate when no prompt and no conditions are given. - max_gen_len (int): Maximum generation length. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - remove_prompts (bool): Whether to remove prompts from generation or not. - Returns: - torch.Tensor: Generated tokens. - """ - assert not self.training, "generation shouldn't be used in training mode." - first_param = next(iter(self.parameters())) - device = first_param.device - - # Checking all input shapes are consistents. - possible_num_samples = [] - if num_samples is not None: - possible_num_samples.append(num_samples) - elif prompt is not None: - possible_num_samples.append(prompt.shape[0]) - elif conditions: - possible_num_samples.append(len(conditions)) - else: - possible_num_samples.append(1) - assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes" - num_samples = possible_num_samples[0] - - # below we create set of conditions: one conditional and one unconditional - # to do that we merge the regular condition together with the null condition - # we then do 1 forward pass instead of 2. - # the reason for that is two-fold: - # 1. it is about x2 faster than doing 2 forward passes - # 2. avoid the streaming API treating the 2 passes as part of different time steps - # We also support doing two different passes, in particular to ensure that - # the padding structure is exactly the same between train anf test. - # With a batch size of 1, this can be slower though. - cfg_conditions: CFGConditions - two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg - if conditions: - null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions) - if two_step_cfg: - cfg_conditions = ( - self.condition_provider(self.condition_provider.tokenize(conditions)), - self.condition_provider(self.condition_provider.tokenize(null_conditions)), - ) - else: - conditions = conditions + null_conditions - tokenized = self.condition_provider.tokenize(conditions) - cfg_conditions = self.condition_provider(tokenized) - else: - cfg_conditions = {} - - if prompt is None: - assert num_samples > 0 - prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device) - - B, K, T = prompt.shape - start_offset = T - assert start_offset < max_gen_len - - pattern = self.pattern_provider.get_pattern(max_gen_len) - # this token is used as default value for codes that are not generated yet - unknown_token = -1 - - # we generate codes up to the max_gen_len that will be mapped to the pattern sequence - gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device) - # filling the gen_codes with the prompt if needed - gen_codes[..., :start_offset] = prompt - # create the gen_sequence with proper interleaving from the pattern: [B, K, S] - gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id) - # retrieve the start_offset in the sequence: - # it is the first sequence step that contains the `start_offset` timestep - start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset) - assert start_offset_sequence is not None - - with self.streaming(): - unconditional_state = self.get_streaming_state() - prev_offset = 0 - gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S] - for offset in range(start_offset_sequence, gen_sequence_len): - # get current sequence (note that the streaming API is providing the caching over previous offsets) - curr_sequence = gen_sequence[..., prev_offset:offset] - curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1) - if check: - # check coherence between mask and sequence - assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all() - # should never happen as gen_sequence is filled progressively - assert not (curr_sequence == unknown_token).any() - # sample next token from the model, next token shape is [B, K, 1] - next_token = self._sample_next_token( - curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p, - cfg_coef=cfg_coef) - # ensure the tokens that should be masked are properly set to special_token_id - # as the model never output special_token_id - valid_mask = mask[..., offset:offset+1].expand(B, -1, -1) - next_token[~valid_mask] = self.special_token_id - # ensure we don't overwrite prompt tokens, we only write over unknown tokens - # (then mask tokens should be left as is as well, which is correct) - gen_sequence[..., offset:offset+1] = torch.where( - gen_sequence[..., offset:offset+1] == unknown_token, - next_token, gen_sequence[..., offset:offset+1] - ) - prev_offset = offset - if callback is not None: - callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence) - unconditional_state.clear() - - # ensure sequence has been entirely filled - assert not (gen_sequence == unknown_token).any() - # ensure gen_sequence pattern and mask are matching - # which means the gen_sequence is valid according to the pattern - assert ( - gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id) - ).all() - # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps - out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token) - - # sanity checks over the returned codes and corresponding masks - assert (out_codes[..., :max_gen_len] != unknown_token).all() - assert (out_mask[..., :max_gen_len] == 1).all() - - out_start_offset = start_offset if remove_prompts else 0 - out_codes = out_codes[..., out_start_offset:max_gen_len] - - # ensure the returned codes are all valid - assert (out_codes >= 0).all() and (out_codes <= self.card).all() - return out_codes diff --git a/spaces/GuiltySpark/amikus_text_summarizer/app.py b/spaces/GuiltySpark/amikus_text_summarizer/app.py deleted file mode 100644 index f815824155621a004fe096d0f4664cad11da11d0..0000000000000000000000000000000000000000 --- a/spaces/GuiltySpark/amikus_text_summarizer/app.py +++ /dev/null @@ -1,69 +0,0 @@ -# https://huggingface.co/tuner007/pegasus_paraphrase - -import torch -from transformers import PegasusForConditionalGeneration, PegasusTokenizer - -model_name = 'tuner007/pegasus_paraphrase' -torch_device = 'cuda' if torch.cuda.is_available() else 'cpu' -tokenizer = PegasusTokenizer.from_pretrained(model_name) -model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device) - -def get_response(input_text,num_return_sequences): - batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device) - translated = model.generate(**batch,max_length=60,num_beams=10, num_return_sequences=num_return_sequences, temperature=1.5) - tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True) - return tgt_text - - - - -from sentence_splitter import SentenceSplitter, split_text_into_sentences - -splitter = SentenceSplitter(language='en') - -def paraphraze(text): - sentence_list = splitter.split(text) - paraphrase = [] - - for i in sentence_list: - a = get_response(i,1) - paraphrase.append(a) - paraphrase2 = [' '.join(x) for x in paraphrase] - paraphrase3 = [' '.join(x for x in paraphrase2) ] - paraphrased_text = str(paraphrase3).strip('[]').strip("'") - return paraphrased_text - - -#python3 -#build a text summarizer using hugging face and gradio -#https://pypi.org/project/gradio/ -#https://huggingface.co/transformers/ -import gradio as gr -import transformers -from transformers import pipeline -import yake - - - -summarizer = pipeline("summarization") - -kw_extractor = yake.KeywordExtractor() -language = "en" -max_ngram_size = 3 -deduplication_threshold = 0.9 -numOfKeywords = 20 -custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_threshold, top=numOfKeywords, features=None) - -def summarize(text): - keywords_2 = [] - summ = summarizer(text, max_length=200, min_length=30)[0]['summary_text'] - keywords = custom_kw_extractor.extract_keywords(text) - for i in range(len(keywords)): - keywords_2.append(keywords[i][0]) - - paraphrased_text = paraphraze(text) - return [summ,keywords_2,paraphrased_text] -gr.Interface(fn=summarize, inputs=gr.inputs.Textbox(lines=7, placeholder="Enter text here"), outputs=[gr.outputs.Textbox(label="Summary"),gr.outputs.Textbox(label="keywords"),gr.outputs.Textbox(label="Paraphrased Text")],examples=[["Simultaneous localization and mapping (SLAM) is the computational problem of constructing or updating a map of an unknown environment while simultaneously keeping track of an agent's location within it. While this initially appears to be a chicken-and-egg problem there are several algorithms known for solving it, at least approximately, in tractable time for certain environments. Popular approximate solution methods include the particle filter, extended Kalman filter, covariance intersection, and GraphSLAM. SLAM algorithms are based on concepts in computational geometry and computer vision, and are used in robot navigation, robotic mapping and odometry for virtual reality or augmented reality.SLAM algorithms are tailored to the available resources, hence not aimed at perfection, but at operational compliance. Published approaches are employed in self-driving cars, unmanned aerial vehicles, autonomous underwater vehicles, planetary rovers, newer domestic robots and even inside the human body. In December 2021, Disney received a patent on augmented reality technology based on SLAM techniques with an array of external projectors, so that AR-enabled headsets or smartphones are not required."], -["Cleopatra VII Philopator was queen of the Ptolemaic Kingdom of Egypt from 51 to 30 BC, and its last active ruler.A member of the Ptolemaic dynasty, she was a descendant of its founder Ptolemy I Soter, a Macedonian Greek general and companion of Alexander the Great.After the death of Cleopatra, Egypt became a province of the Roman Empire, marking the end of the second to last Hellenistic state and the age that had lasted since the reign of Alexander (336–323 BC).Her native language was Koine Greek, and she was the only Ptolemaic ruler to learn the Egyptian language.In 58 BC, Cleopatra presumably accompanied her father, Ptolemy XII Auletes, during his exile to Rome after a revolt in Egypt (a Roman client state) allowed his rival daughter Berenice IV to claim his throne. Berenice was killed in 55 BC when Ptolemy returned to Egypt with Roman military assistance. When he died in 51 BC, the joint reign of Cleopatra and her brother Ptolemy XIII began, but a falling-out between them led to open civil war. After losing the 48 BC Battle of Pharsalus in Greece against his rival Julius Caesar (a Roman dictator and consul) in Caesar's Civil War, the Roman statesman Pompey fled to Egypt. Pompey had been a political ally of Ptolemy XII, but Ptolemy XIII, at the urging of his court eunuchs, had Pompey ambushed and killed before Caesar arrived and occupied Alexandria. Caesar then attempted to reconcile the rival Ptolemaic siblings, but Ptolemy's chief adviser, Potheinos, viewed Caesar's terms as favoring Cleopatra, so his forces besieged her and Caesar at the palace. Shortly after the siege was lifted by reinforcements, Ptolemy XIII died in the 47 BC Battle of the Nile; Cleopatra's half-sister Arsinoe IV was eventually exiled to Ephesus for her role in carrying out the siege. Caesar declared Cleopatra and her brother Ptolemy XIV joint rulers but maintained a private affair with Cleopatra that produced a son, Caesarion. Cleopatra traveled to Rome as a client queen in 46 and 44 BC, where she stayed at Caesar's villa. After the assassinations of Caesar and (on her orders) Ptolemy XIV in 44 BC, she named Caesarion co-ruler as Ptolemy XV."], -["A black hole is a region of spacetime where gravity is so strong that nothing — no particles or even electromagnetic radiation such as light — can escape from it. The theory of general relativity predicts that a sufficiently compact mass can deform spacetime to form a black hole.The boundary of no escape is called the event horizon. Although it has an enormous effect on the fate and circumstances of an object crossing it, it has no locally detectable features according to general relativity.In many ways, a black hole acts like an ideal black body, as it reflects no light. Moreover, quantum field theory in curved spacetime predicts that event horizons emit Hawking radiation, with the same spectrum as a black body of a temperature inversely proportional to its mass. This temperature is of the order of billionths of a kelvin for stellar black holes, making it essentially impossible to observe directly.Objects whose gravitational fields are too strong for light to escape were first considered in the 18th century by John Michell and Pierre-Simon Laplace.In 1916, Karl Schwarzschild found the first modern solution of general relativity that would characterize a black hole. David Finkelstein, in 1958, first published the interpretation of black hole as a region of space from which nothing can escape. Black holes were long considered a mathematical curiosity; it was not until the 1960s that theoretical work showed they were a generic prediction of general relativity. The discovery of neutron stars by Jocelyn Bell Burnell in 1967 sparked interest in gravitationally collapsed compact objects as a possible astrophysical reality. The first black hole known was Cygnus X-1, identified by several researchers independently in 1971." -]]).launch(inline=False) \ No newline at end of file diff --git a/spaces/Hallucinate/demo/taming/modules/losses/lpips.py b/spaces/Hallucinate/demo/taming/modules/losses/lpips.py deleted file mode 100644 index a7280447694ffc302a7636e7e4d6183408e0aa95..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/taming/modules/losses/lpips.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models""" - -import torch -import torch.nn as nn -from torchvision import models -from collections import namedtuple - -from taming.util import get_ckpt_path - - -class LPIPS(nn.Module): - # Learned perceptual metric - def __init__(self, use_dropout=True): - super().__init__() - self.scaling_layer = ScalingLayer() - self.chns = [64, 128, 256, 512, 512] # vg16 features - self.net = vgg16(pretrained=True, requires_grad=False) - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.load_from_pretrained() - for param in self.parameters(): - param.requires_grad = False - - def load_from_pretrained(self, name="vgg_lpips"): - ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips") - self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) - print("loaded pretrained LPIPS loss from {}".format(ckpt)) - - @classmethod - def from_pretrained(cls, name="vgg_lpips"): - if name != "vgg_lpips": - raise NotImplementedError - model = cls() - ckpt = get_ckpt_path(name) - model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) - return model - - def forward(self, input, target): - in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target)) - outs0, outs1 = self.net(in0_input), self.net(in1_input) - feats0, feats1, diffs = {}, {}, {} - lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] - for kk in range(len(self.chns)): - feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 - - res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))] - val = res[0] - for l in range(1, len(self.chns)): - val += res[l] - return val - - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) - self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - """ A single linear layer which does a 1x1 conv """ - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - layers = [nn.Dropout(), ] if (use_dropout) else [] - layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] - self.model = nn.Sequential(*layers) - - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = models.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - return out - - -def normalize_tensor(x,eps=1e-10): - norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True)) - return x/(norm_factor+eps) - - -def spatial_average(x, keepdim=True): - return x.mean([2,3],keepdim=keepdim) - diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/new/decoders/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/new/decoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/libri_labels.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/libri_labels.py deleted file mode 100644 index 694a202604c7a4a480550550679ce6c16bd10e42..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/libri_labels.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset -""" - -import argparse -import os - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("tsv") - parser.add_argument("--output-dir", required=True) - parser.add_argument("--output-name", required=True) - args = parser.parse_args() - - os.makedirs(args.output_dir, exist_ok=True) - - transcriptions = {} - - with open(args.tsv, "r") as tsv, open( - os.path.join(args.output_dir, args.output_name + ".ltr"), "w" - ) as ltr_out, open( - os.path.join(args.output_dir, args.output_name + ".wrd"), "w" - ) as wrd_out: - root = next(tsv).strip() - for line in tsv: - line = line.strip() - dir = os.path.dirname(line) - if dir not in transcriptions: - parts = dir.split(os.path.sep) - trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt" - path = os.path.join(root, dir, trans_path) - assert os.path.exists(path) - texts = {} - with open(path, "r") as trans_f: - for tline in trans_f: - items = tline.strip().split() - texts[items[0]] = " ".join(items[1:]) - transcriptions[dir] = texts - part = os.path.basename(line).split(".")[0] - assert part in transcriptions[dir] - print(transcriptions[dir][part], file=wrd_out) - print( - " ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |", - file=ltr_out, - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/monotonic_align/setup.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/monotonic_align/setup.py deleted file mode 100644 index 3a3892f92e3fbb866e3111199a9a4cf1f88e3959..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/monotonic_align/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy -from setuptools import Extension, find_packages -from distutils.core import setup -from Cython.Build import cythonize - - -_VERSION = "1.1" - - -ext_modules = cythonize( - "monotonic_align/core.pyx", - compiler_directives={"language_level": "3"}, -) - -setup( - name="monotonic_align", - ext_modules=ext_modules, - include_dirs=[numpy.get_include(), "monotonic_align"], - packages=find_packages(), - setup_requires=["numpy", "cython"], - install_requires=["numpy"], - version=_VERSION, -) diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/t2s_fastapi.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/t2s_fastapi.py deleted file mode 100644 index e034fc01a4a5bcd54b365a49dad2e907b57504a1..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/t2s_fastapi.py +++ /dev/null @@ -1,63 +0,0 @@ -from starlette.responses import StreamingResponse -from texttospeech import MelToWav, TextToMel -from typing import Optional -from pydantic import BaseModel -from fastapi import FastAPI, HTTPException -import uvicorn -import base64 - -app = FastAPI() - - -class TextJson(BaseModel): - text: str - lang: Optional[str] = "hi" - gender: Optional[str] = "male" - - -glow_hi_male = TextToMel(glow_model_dir="", device="") -glow_hi_female = TextToMel(glow_model_dir="", device="") -hifi_hi = MelToWav(hifi_model_dir="", device="") - - -available_choice = { - "hi_male": [glow_hi_male, hifi_hi], - "hi_female": [glow_hi_female, hifi_hi], -} - - -@app.post("/TTS/") -async def tts(input: TextJson): - text = input.text - lang = input.lang - gender = input.gender - - choice = lang + "_" + gender - if choice in available_choice.keys(): - t2s = available_choice[choice] - else: - raise HTTPException( - status_code=400, detail={"error": "Requested model not found"} - ) - - if text: - mel = t2s[0].generate_mel(text) - data, sr = t2s[1].generate_wav(mel) - t2s.save_audio("out.wav", data, sr) - else: - raise HTTPException(status_code=400, detail={"error": "No text"}) - - ## to return outpur as a file - # audio = open('out.wav', mode='rb') - # return StreamingResponse(audio, media_type="audio/wav") - - with open("out.wav", "rb") as audio_file: - encoded_bytes = base64.b64encode(audio_file.read()) - encoded_string = encoded_bytes.decode() - return {"encoding": "base64", "data": encoded_string, "sr": sr} - - -if __name__ == "__main__": - uvicorn.run( - "t2s_fastapi:app", host="127.0.0.1", port=5000, log_level="info", reload=True - ) diff --git a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/tokenize/indic_tokenize.py b/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/tokenize/indic_tokenize.py deleted file mode 100644 index 0c3864776382c468ff863bb6d5ef8d2180cd782f..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/tokenize/indic_tokenize.py +++ /dev/null @@ -1,111 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program for tokenizing Indian language input -# -# @author Anoop Kunchukuttan -# -""" -Tokenizer for Indian languages. Currently, simple punctuation-based tokenizers -are supported (see `trivial_tokenize`). Major Indian language punctuations are -handled. -""" -import string, re, sys - -from indicnlp.common import IndicNlpException - -### tokenizer patterns -triv_tokenizer_indic_pat=re.compile(r'(['+string.punctuation+r'\u0964\u0965'+r'])') -triv_tokenizer_urdu_pat=re.compile(r'(['+string.punctuation+r'\u0609\u060A\u060C\u061E\u066A\u066B\u066C\u066D\u06D4'+r'])') - -## date, numbers, section/article numbering -pat_num_seq=re.compile(r'([0-9]+ [,.:/] )+[0-9]+') - -def trivial_tokenize_indic(text): - """tokenize string for Indian language scripts using Brahmi-derived scripts - - A trivial tokenizer which just tokenizes on the punctuation boundaries. - This also includes punctuations for the Indian language scripts (the - purna virama and the deergha virama). This is a language independent - tokenizer - - Args: - text (str): text to tokenize - - Returns: - list: list of tokens - - """ - tok_str=triv_tokenizer_indic_pat.sub(r' \1 ',text.replace('\t',' ')) -# return re.sub(r'[ ]+',' ',tok_str).strip(' ').split(' ') - - s=re.sub(r'[ ]+',' ',tok_str).strip(' ') - - # do not tokenize numbers and dates - new_s='' - prev=0 - for m in pat_num_seq.finditer(s): - start=m.start() - end=m.end() - if start>prev: - new_s=new_s+s[prev:start] - new_s=new_s+s[start:end].replace(' ','') - prev=end - - new_s=new_s+s[prev:] - s=new_s - - return s.split(' ') - -def trivial_tokenize_urdu(text): - """tokenize Urdu string - - A trivial tokenizer which just tokenizes on the punctuation boundaries. - This also includes punctuations for the Urdu script. - These punctuations characters were identified from the Unicode database - for Arabic script by looking for punctuation symbols. - - Args: - text (str): text to tokenize - - Returns: - list: list of tokens - """ - tok_str=triv_tokenizer_urdu_pat.sub(r' \1 ',text.replace('\t',' ')) - return re.sub(r'[ ]+',' ',tok_str).strip(' ').split(' ') - -def trivial_tokenize(text,lang='hi'): - """trivial tokenizer for Indian languages using Brahmi for Arabic scripts - - A trivial tokenizer which just tokenizes on the punctuation boundaries. - Major punctuations specific to Indian langauges are handled. - These punctuations characters were identified from the Unicode database. - - Args: - text (str): text to tokenize - lang (str): ISO 639-2 language code - - Returns: - list: list of tokens - """ - if lang=='ur': - return trivial_tokenize_urdu(text) - else: - return trivial_tokenize_indic(text) - -# if __name__ == '__main__': - -# if len(sys.argv)<4: -# print("Usage: python indic_tokenize.py ") -# sys.exit(1) - -# with open(sys.argv[1],'r', encoding='utf-8') as ifile: -# with open(sys.argv[2],'w', encoding='utf-8') as ofile: -# for line in ifile: -# tokenized_line=' '.join(trivial_tokenize(line,sys.argv[3])) -# ofile.write(tokenized_line) diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/run.sh b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/run.sh deleted file mode 100644 index 333a437a474cc87d943c7e14d62b64f605a17c0a..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/run.sh +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env bash - - -python3 run_data_measurements.py --dataset="hate_speech18" --config="default" --split="train" --label_field="label" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="hate_speech_offensive" --config="default" --split="train" --label_field="label" --feature="tweet" --overwrite_previous - - -python3 run_data_measurements.py --dataset="imdb" --config="plain_text" --split="train" --label_field="label" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="imdb" --config="plain_text" --split="unsupervised" --label_field="label" --feature="text" --overwrite_previous - - -python3 run_data_measurements.py --dataset="glue" --config="cola" --split="train" --label_field="label" --feature="sentence" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="cola" --split="validation" --label_field="label" --feature="sentence" --overwrite_previous - -python3 run_data_measurements.py --dataset="glue" --config="mnli" --split="train" --label_field="label" --feature="hypothesis" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli" --split="train" --label_field="label" --feature="premise" --overwrite_previous - -python3 run_data_measurements.py --dataset="glue" --config="mnli" --split="validation_matched" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli" --split="validation_matched" --label_field="label" --feature="hypothesis" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli" --split="validation_mismatched" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli" --split="validation_mismatched" --label_field="label" --feature="hypothesis" --overwrite_previous - - -python3 run_data_measurements.py --dataset="glue" --config="mrpc" --split="train" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mrpc" --split="train" --label_field="label" --feature="sentence2" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mrpc" --split="validation" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mrpc" --split="validation" --label_field="label" --feature="sentence2" --overwrite_previous - - -python3 run_data_measurements.py --dataset="glue" --config="rte" --split="train" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="rte" --split="train" --label_field="label" --feature="sentence2" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="rte" --split="validation" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="rte" --split="validation" --label_field="label" --feature="sentence2" --overwrite_previous - - -python3 run_data_measurements.py --dataset="glue" --config="stsb" --split="train" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="stsb" --split="train" --label_field="label" --feature="sentence2" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="stsb" --split="validation" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="stsb" --split="validation" --label_field="label" --feature="sentence2" --overwrite_previous - -python3 run_data_measurements.py --dataset="glue" --config="wnli" --split="train" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="wnli" --split="train" --label_field="label" --feature="sentence2" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="wnli" --split="validation" --label_field="label" --feature="sentence1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="wnli" --split="validation" --label_field="label" --feature="sentence2" --overwrite_previous - -python3 run_data_measurements.py --dataset="glue" --config="sst2" --split="train" --label_field="label" --feature="sentence" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="sst2" --split="validation" --label_field="label" --feature="sentence" --overwrite_previous - - -python3 run_data_measurements.py --dataset="glue" --config="qnli" --split="train" --label_field="label" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="qnli" --split="train" --label_field="label" --feature="sentence" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="qnli" --split="validation" --label_field="label" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="qnli" --split="validation" --label_field="label" --feature="sentence" --overwrite_previous - - -python3 run_data_measurements.py --dataset="glue" --config="qqp" --split="train" --label_field="label" --feature="question1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="qqp" --split="train" --label_field="label" --feature="question2" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="qqp" --split="validation" --label_field="label" --feature="question1" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="qqp" --split="validation" --label_field="label" --feature="question2" --overwrite_previous - -python3 run_data_measurements.py --dataset="glue" --config="mnli_matched" --split="validation" --label_field="label" --feature="hypothesis" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli_matched" --split="validation" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli_mismatched" --split="validation" --label_field="label" --feature="hypothesis" --overwrite_previous -python3 run_data_measurements.py --dataset="glue" --config="mnli_mismatched" --split="validation" --label_field="label" --feature="premise" --overwrite_previous - - -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-103-v1" --split="train" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-103-raw-v1" --split="train" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-2-v1" --split="train" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-2-raw-v1" --split="train" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-103-v1" --split="validation" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-103-raw-v1" --split="validation" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-2-v1" --split="validation" --feature="text" --overwrite_previous -python3 run_data_measurements.py --dataset="wikitext" --config="wikitext-2-raw-v1" --split="validation" --feature="text" --overwrite_previous - - -# Superglue wsc? wic? rte? record? multirc? - -python3 run_data_measurements.py --dataset="super_glue" --config="boolq" --split="train" --label_field="label" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="boolq" --split="validation" --label_field="label" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="boolq" --split="train" --label_field="label" --feature="passage" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="boolq" --split="validation" --label_field="label" --feature="passage" --overwrite_previous - -python3 run_data_measurements.py --dataset="super_glue" --config="cb" --split="train" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="cb" --split="validation" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="cb" --split="train" --label_field="label" --feature="hypothesis" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="cb" --split="validation" --label_field="label" --feature="hypothesis" --overwrite_previous - - -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="train" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="validation" --label_field="label" --feature="premise" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="train" --label_field="label" --feature="choice1" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="validation" --label_field="label" --feature="choice1" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="train" --label_field="label" --feature="choice2" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="validation" --label_field="label" --feature="choice2" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="train" --label_field="label" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="super_glue" --config="copa" --split="validation" --label_field="label" --feature="question" --overwrite_previous - -python3 run_data_measurements.py --dataset="squad" --config="plain_text" --split="train" --feature="context" --overwrite_previous -python3 run_data_measurements.py --dataset="squad" --config="plain_text" --split="train" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="squad" --config="plain_text" --split="train" --feature="title" --overwrite_previous -python3 run_data_measurements.py --dataset="squad" --config="plain_text" --split="validation" --feature="context" --overwrite_previous -python3 run_data_measurements.py --dataset="squad" --config="plain_text" --split="validation" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="squad" --config="plain_text" --split="validation" --feature="title" --overwrite_previous - - -python3 run_data_measurements.py --dataset="squad_v2" --config="squad_v2" --split="train" --feature="context" --overwrite_previous -python3 run_data_measurements.py --dataset="squad_v2" --config="squad_v2" --split="train" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="squad_v2" --config="squad_v2" --split="train" --feature="title" --overwrite_previous -python3 run_data_measurements.py --dataset="squad_v2" --config="squad_v2" --split="validation" --feature="context" --overwrite_previous -python3 run_data_measurements.py --dataset="squad_v2" --config="squad_v2" --split="validation" --feature="question" --overwrite_previous -python3 run_data_measurements.py --dataset="squad_v2" --config="squad_v2" --split="validation" --feature="title" --overwrite_previous diff --git a/spaces/ICML2022/OFA/fairseq/examples/megatron_11b/README.md b/spaces/ICML2022/OFA/fairseq/examples/megatron_11b/README.md deleted file mode 100644 index 945c96c91e2e2d93466abc28d90bc25a1e7dd471..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/megatron_11b/README.md +++ /dev/null @@ -1,161 +0,0 @@ -# Megatron-11b - -Megatron-11b is a unidirectional language model with `11B` parameters based on [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf). Following the original Megatron work, we trained the model using intra-layer model parallelism with each layer's parameters split across 8 GPUs. - -Megatron-11b is trained on the same data and uses the same byte-pair encoding (BPE) as [RoBERTa](https://arxiv.org/pdf/1907.11692.pdf). - -## Pre-trained models - -Model | Description | # params | # filesize | Download ----|---|---|---|--- -`megatron_11b` | megatron_11b unidirectional language model | 11B | 19Gb | [megatron_11b.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz) - -#### Architecture: - -Param | Value ----|--- -embed_dim | 3072 -ffn_dim | 3072 * 6 -layers | 72 -attention heads | 32 - -#### Training details: - -Param | value ----|--- -bsz | 512 -num_updates | 300,000 -peak_lr | 1.5e-04 -lr scheduler | inverse_sqrt -clip norm | 0.0 - - -## Example training command (model parallel) - -Megatron-11b contains too many parameters to train on a single GPU. Following -the original Megatron work, we adopt an intra-layer model parallel training -approach in which each layer's parameters are split across multiple GPUs and -activations and gradients are communicated during the forward/backward pass, -respectively. We similarly split the loss computation using the -`vocab_parallel_cross_entropy` criterion. - -The following training command illustrates how to do model parallel training in -fairseq. We assume that each machine (node) has 8 GPUs among which to split the -model parameters (`--model-parallel-size 8`). If you have access to multiple -nodes, you may combine this with data parallel training by increasing -`--distributed-world-size`. - -To train Megatron-11b on a single node: - - -```bash -fairseq-train \ - --distributed-world-size 8 \ - --memory-efficient-fp16 \ - --num-workers 2 \ - --model-parallel-size 8 \ - --criterion vocab_parallel_cross_entropy \ - --task language_modeling \ - --sample-break-mode none \ - --tokens-per-sample 1024 \ - --arch transformer_lm_megatron_11b \ - --share-decoder-input-output-embed \ - --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-08 --clip-norm 0.0 \ - --lr-scheduler inverse_sqrt --lr 0.00015 \ - --warmup-updates 3000 --weight-decay 0.01 \ - --dropout 0.1 --attention-dropout 0.1 \ - --batch-size 2 \ - --max-update 300000; -``` - -Note: Above was tested on `DGX-1` box, with `8xV100-32Gb` GPUs. - -## Results - -**[Wikitext103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)** - -Model | Valid perplexity | Test perplexity ----|---|--- -`megatron_11b` | 10.64 | 10.54 - - -## Evaluating `megatron_11b` on Wikitext-103 - -#### 1. Downloading Megatron-11b -```bash -# WARNING: this file is 19GB -wget https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz -tar -xzvf megatron_11b.tar.gz -``` - -#### 2. Download Wikitext-103 -```bash -wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip -unzip wikitext-103-raw-v1.zip -``` - -#### 3. Detokenize test tokens -Megatron-11b uses a byte-level BPE that expects raw (untokenized) input. Since -the wikitext-103 dataset comes tokenized, we apply a simple detokenization -process to restore the untokenized test set: - -```bash -python -m examples.megatron_11b.detok wikitext-103-raw/wiki.test.raw > wikitext-103-raw/wiki.test.detok -``` - -#### 4. BPE encoding -```bash -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' - -python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "wikitext-103-raw/wiki.test.detok" \ - --outputs "wikitext-103-raw/wiki.test.bpe" \ - --workers 60; -``` - -#### 5. Fairseq binarize -```bash -fairseq-preprocess \ - --only-source \ - --testpref wikitext-103-raw/wiki.test.bpe \ - --srcdict megatron_11b/dict.txt \ - --destdir wikitext103-bin; -``` - -#### 6. Evaluating perplexity. -We can now evaluate perplexity on the test set. Note that because we've modified -the test set (via detokenization and BPE), the perplexity reported by -`fairseq-eval-lm` needs to be renormalized. - -Compute unnormalized perplexity: - -```bash -DATA_PATH=wikitext103-bin/ -fairseq-eval-lm \ - $DATA_PATH \ - --path megatron_11b/model.pt \ - --task language_modeling \ - --gen-subset test \ - --batch-size 8 \ - --criterion cross_entropy \ - --context-window 992 \ - --distributed-world-size 8 \ - --model-parallel-size 8; -# Expected PPL (unnormalized_ppl): [8.46] -# Note: the eval command needs to run on 8 GPUs for the released model -``` -Renormalizing formula: `2 ^ ( log_2(unnormalized_PPL) * (270847 / 245566))`. -PPL After normalization: `10.54` - -To renormalize the perplexity, we must account for the change in token count -after detokenizing and appling BPE. The formula for this is: -`2 ^ ( log_2(unnormalized_PPL) * (new_token_cnt / orig_token_cnt))` - -For the wikitext-103 test set, the original token count is `245566` and the -token count after detokenization and applying BPE is `270847`. - -The perplexity after renormalization is: -`2 ^ ( log_2(8.46) * (270847 / 245566)) = 10.54` diff --git a/spaces/Illia56/book-mind-ai/app.py b/spaces/Illia56/book-mind-ai/app.py deleted file mode 100644 index fca710eb0abd25842ed09b0621c9425abed4fd0d..0000000000000000000000000000000000000000 --- a/spaces/Illia56/book-mind-ai/app.py +++ /dev/null @@ -1,115 +0,0 @@ -import gradio as gr -import json -import markdown -from bardapi import Bard -from telegraph import Telegraph -import time - -# Set up the Telegraph client -telegraph = Telegraph() -telegraph.create_account(short_name='BookMindAI') - -with open('detail_queries.json', 'r') as file: - detail_queries = json.load(file) -with open('lang.json', 'r') as file: - languages = [str(x) for x in json.load(file).keys()] - -def markdown_to_html(md_content): - return markdown.markdown(md_content) - -def is_link_image(link): - image_types = ["png", "jpg", "jpeg", "gif", "webp", "svg", "bmp", "tiff"] - return any(ext in link for ext in image_types) - -def fetch_summary(bard, book_name, author): - question = f"Provide a short summary of the book '{book_name}' by {author}." - bard_answer = bard.get_answer(question) - return bard_answer - -def fetch_book_cover(bard, book_name, author, language): - query = f"Find me 10 covers image of the book '{book_name}' by {author} in {language} language." - response = bard.get_answer(query) - return [link for link in response["links"] if is_link_image(link)][:2] - -def post_to_telegraph(title, content): - html_content = markdown_to_html(content) - response = telegraph.create_page( - title=title, - html_content=html_content - ) - return 'https://telegra.ph/{}'.format(response['path']) - -def generate_predictions(token, book_name, author, language_choice, detail_options=[]): - bard = Bard(token=token, language=language_choice[3:].lower()) - image_links = fetch_book_cover(bard, book_name, author, language_choice) - - details = "" - for option in detail_options: - query_template = detail_queries.get(option).format(book_name=book_name, author=author) - try: - response = bard.get_answer(query_template) - details += f"\n\n**{option}**:\n{response['content']}" - except: - time.sleep(20) - try: - response = bard.get_answer(query_template) - details += f"\n\n**{option}**:\n{response['content']}" - except: - pass - - summary = fetch_summary(bard, book_name, author) - combined_summary = summary["content"] + details - try: - telegraph_url = post_to_telegraph(f"Summary of {book_name} by {author}", combined_summary) - except requests.exceptions.ConnectionError: - telegraph_url = "Error connecting to Telegraph API" - - return image_links, combined_summary, telegraph_url - -with gr.Blocks(title="📚 BookMindAI", theme=gr.themes.Base()).queue() as demo: - gr.DuplicateButton() - with gr.Tab("Summarize book🎯"): - with gr.Row(): - with gr.Column(): - token_input_summarize = gr.Textbox(placeholder="Enter Bard API Token", label="Bard API Token") - book_name_input = gr.Textbox(placeholder="Enter Book Name", label="Book Name") - author_name_input = gr.Textbox(placeholder="Enter Author Name", label="Author Name") - language_input = gr.Dropdown(choices=languages, label="Language") - detail_options_input = gr.CheckboxGroup(choices=list(detail_queries.keys()), label="Details to Include", visible=True) - run_button_summarize = gr.Button(label="Run", visible=True) - - with gr.Column(): - book_cover_output = gr.Gallery(label="Book Cover", visible=True) - telegraph_link_output = gr.Markdown(label="View on Telegraph", visible=True) - with gr.Row(): - summary_output = gr.Markdown(label="Parsed Content", visible=True) - - run_button_summarize.click(fn=generate_predictions, - inputs=[token_input_summarize, book_name_input, author_name_input, language_input, detail_options_input], - outputs=[book_cover_output, summary_output, telegraph_link_output], - show_progress=True, queue=True) - - examples_summarize = [ - ["YOUR_BARD_API_TOKEN", "Harry Potter and the Philosopher's Stone", "J.K. Rowling", "🇬🇧 english"], - ["YOUR_BARD_API_TOKEN", "Pride and Prejudice", "Jane Austen", "🇺🇦 ukrainian"], - ["YOUR_BARD_API_TOKEN", "The Great Gatsby", "F. Scott Fitzgerald", "🇫🇷 french"] - ] - gr.Examples(examples=examples_summarize, inputs=[token_input_summarize, book_name_input, author_name_input, language_input, detail_options_input]) - - with gr.Tab("Talk about book🎓"): - chat_examples = [ - "How do the underlying themes of a book reflect the societal values and beliefs of its time?", - "In what ways do the characters' personal journeys mirror the broader human experience?", - "How does the author's use of symbolism and allegory provide insight into the deeper truths of our existence?", - "To what extent does the narrative structure of the book challenge or reinforce our understanding of reality?" - ] - - def chat_response(message, history): - bard = Bard(token=token_input_chat.value, language="en") # Assuming the language is English, modify it accordingly - for i in range(len(message)): - response = bard.get_answer(message) - yield response['content'] - - chat_interface = gr.ChatInterface(chat_response, examples=chat_examples, title='Talk with Palm 2 about any book.') - -demo.launch() \ No newline at end of file diff --git a/spaces/Illumotion/Koboldcpp/examples/quantize/quantize.cpp b/spaces/Illumotion/Koboldcpp/examples/quantize/quantize.cpp deleted file mode 100644 index c7dd0d894634cbb242a5dcdbb2fbe45cbe8f104e..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/quantize/quantize.cpp +++ /dev/null @@ -1,201 +0,0 @@ -#include "build-info.h" -#include "common.h" -#include "llama.h" - -#include -#include -#include -#include - -struct quant_option { - std::string name; - llama_ftype ftype; - std::string desc; -}; - -static const std::vector QUANT_OPTIONS = { - { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.56G, +0.2166 ppl @ LLaMA-v1-7B", }, - { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1585 ppl @ LLaMA-v1-7B", }, - { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.33G, +0.0683 ppl @ LLaMA-v1-7B", }, - { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0349 ppl @ LLaMA-v1-7B", }, -#ifdef GGML_USE_K_QUANTS - { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", }, - { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" }, - { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", }, - { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.07G, +0.2496 ppl @ LLaMA-v1-7B", }, - { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", }, - { "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", }, - { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", }, - { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0532 ppl @ LLaMA-v1-7B", }, - { "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", }, - { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0400 ppl @ LLaMA-v1-7B", }, - { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", }, - { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, -0.0008 ppl @ LLaMA-v1-7B", }, -#endif - { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", }, - { "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", }, - { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", }, - // Note: Ensure COPY comes after F32 to avoid ftype 0 from matching. - { "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", }, -}; - - -static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) { - std::string ftype_str; - - for (auto ch : ftype_str_in) { - ftype_str.push_back(std::toupper(ch)); - } - for (auto & it : QUANT_OPTIONS) { - if (it.name == ftype_str) { - ftype = it.ftype; - ftype_str_out = it.name; - return true; - } - } - try { - int ftype_int = std::stoi(ftype_str); - for (auto & it : QUANT_OPTIONS) { - if (it.ftype == ftype_int) { - ftype = it.ftype; - ftype_str_out = it.name; - return true; - } - } - } - catch (...) { - // stoi failed - } - return false; -} - -// usage: -// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] -// -[[noreturn]] -static void usage(const char * executable) { - printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); - printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); - printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); - printf("\nAllowed quantization types:\n"); - for (auto & it : QUANT_OPTIONS) { - if (it.name != "COPY") { - printf(" %2d or ", it.ftype); - } else { - printf(" "); - } - printf("%-6s : %s\n", it.name.c_str(), it.desc.c_str()); - } - exit(1); -} - -int main(int argc, char ** argv) { - if (argc < 3) { - usage(argv[0]); - } - - llama_model_quantize_params params = llama_model_quantize_default_params(); - - int arg_idx = 1; - - for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { - if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { - params.quantize_output_tensor = false; - } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) { - params.allow_requantize = true; - } else { - usage(argv[0]); - } - } - - if (argc - arg_idx < 2) { - usage(argv[0]); - } - - llama_backend_init(false); - - // parse command line arguments - const std::string fname_inp = argv[arg_idx]; - arg_idx++; - std::string fname_out; - - std::string ftype_str; - if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { - std::string fpath; - const size_t pos = fname_inp.find_last_of("/\\"); - if (pos != std::string::npos) { - fpath = fname_inp.substr(0, pos + 1); - } - // export as [inp path]/ggml-model-[ftype].gguf - fname_out = fpath + "ggml-model-" + ftype_str + ".gguf"; - arg_idx++; - if (ftype_str == "COPY") { - params.only_copy = true; - } - } - else { - fname_out = argv[arg_idx]; - arg_idx++; - - if (argc <= arg_idx) { - fprintf(stderr, "%s: missing ftype\n", __func__); - return 1; - } - if (!try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { - fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]); - return 1; - } - if (ftype_str == "COPY") { - params.only_copy = true; - } - arg_idx++; - } - - // parse nthreads - if (argc > arg_idx) { - try { - params.nthread = std::stoi(argv[arg_idx]); - } - catch (const std::exception & e) { - fprintf(stderr, "%s: invalid nthread '%s' (%s)\n", __func__, argv[arg_idx], e.what()); - return 1; - } - } - - print_build_info(); - - fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, fname_inp.c_str(), fname_out.c_str(), ftype_str.c_str()); - if (params.nthread > 0) { - fprintf(stderr, " using %d threads", params.nthread); - } - fprintf(stderr, "\n"); - - const int64_t t_main_start_us = llama_time_us(); - - int64_t t_quantize_us = 0; - - // load the model - { - const int64_t t_start_us = llama_time_us(); - - if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), ¶ms)) { - fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); - return 1; - } - - t_quantize_us = llama_time_us() - t_start_us; - } - - // report timing - { - const int64_t t_main_end_us = llama_time_us(); - - printf("\n"); - printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0); - } - - llama_backend_free(); - - return 0; -} diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/facelib/parsing/__init__.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/facelib/parsing/__init__.py deleted file mode 100644 index 72656e4b5f61df8cd0838588b0c6488fcc886e16..0000000000000000000000000000000000000000 --- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/facelib/parsing/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -import torch - -from facelib.utils import load_file_from_url -from .bisenet import BiSeNet -from .parsenet import ParseNet - - -def init_parsing_model(model_name='bisenet', half=False, device='cuda'): - if model_name == 'bisenet': - model = BiSeNet(num_class=19) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_bisenet.pth' - elif model_name == 'parsenet': - model = ParseNet(in_size=512, out_size=512, parsing_ch=19) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth' - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) - load_net = torch.load(model_path, map_location=lambda storage, loc: storage) - model.load_state_dict(load_net, strict=True) - model.eval() - model = model.to(device) - return model diff --git a/spaces/JohnC26/7-NER-Biomed-ClinicalTerms/backup.app.py b/spaces/JohnC26/7-NER-Biomed-ClinicalTerms/backup.app.py deleted file mode 100644 index fd97bf2a8592b219ba1c2d4c94187d984e63d114..0000000000000000000000000000000000000000 --- a/spaces/JohnC26/7-NER-Biomed-ClinicalTerms/backup.app.py +++ /dev/null @@ -1,268 +0,0 @@ -import gradio as gr -import pandas as pd -import json -from collections import defaultdict - -# Create tokenizer for biomed model -from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification -tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all") # https://huggingface.co/d4data/biomedical-ner-all?text=asthma -model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all") -pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") - -# Matplotlib for entity graph -import matplotlib.pyplot as plt -plt.switch_backend("Agg") - -# Load examples from JSON -import os - -# Load terminology datasets: -basedir = os.path.dirname(__file__) -#dataLOINC = pd.read_csv(basedir + "\\" + f'LoincTableCore.csv') -#dataPanels = pd.read_csv(basedir + "\\" + f'PanelsAndForms-ACW1208Labeled.csv') -#dataSNOMED = pd.read_csv(basedir + "\\" + f'sct2_TextDefinition_Full-en_US1000124_20220901.txt',sep='\t') -#dataOMS = pd.read_csv(basedir + "\\" + f'SnomedOMS.csv') -#dataICD10 = pd.read_csv(basedir + "\\" + f'ICD10Diagnosis.csv') - -dataLOINC = pd.read_csv(f'LoincTableCore.csv') -dataPanels = pd.read_csv(f'PanelsAndForms-ACW1208Labeled.csv') -dataSNOMED = pd.read_csv(f'sct2_TextDefinition_Full-en_US1000124_20220901.txt',sep='\t') -dataOMS = pd.read_csv(f'SnomedOMS.csv') -dataICD10 = pd.read_csv(f'ICD10Diagnosis.csv') - -dir_path = os.path.dirname(os.path.realpath(__file__)) -EXAMPLES = {} -#with open(dir_path + "\\" + "examples.json", "r") as f: -with open("examples.json", "r") as f: - example_json = json.load(f) - EXAMPLES = {x["text"]: x["label"] for x in example_json} - -def MatchLOINC(name): - #basedir = os.path.dirname(__file__) - pd.set_option("display.max_rows", None) - #data = pd.read_csv(basedir + "\\" + f'LoincTableCore.csv') - data = dataLOINC - swith=data.loc[data['COMPONENT'].str.contains(name, case=False, na=False)] - return swith - -def MatchLOINCPanelsandForms(name): - #basedir = os.path.dirname(__file__) - #data = pd.read_csv(basedir + "\\" + f'PanelsAndForms-ACW1208Labeled.csv') - data = dataPanels - # Assessment Name: - #swith=data.loc[data['ParentName'].str.contains(name, case=False, na=False)] - # Assessment Question: - swith=data.loc[data['LoincName'].str.contains(name, case=False, na=False)] - return swith - -def MatchSNOMED(name): - #basedir = os.path.dirname(__file__) - #data = pd.read_csv(basedir + "\\" + f'sct2_TextDefinition_Full-en_US1000124_20220901.txt',sep='\t') - data = dataSNOMED - swith=data.loc[data['term'].str.contains(name, case=False, na=False)] - return swith - -def MatchOMS(name): - #basedir = os.path.dirname(__file__) - #data = pd.read_csv(basedir + "\\" + f'SnomedOMS.csv') - data = dataOMS - swith=data.loc[data['SNOMED CT'].str.contains(name, case=False, na=False)] - return swith - -def MatchICD10(name): - #basedir = os.path.dirname(__file__) - #data = pd.read_csv(basedir + "\\" + f'ICD10Diagnosis.csv') - data = dataICD10 - swith=data.loc[data['Description'].str.contains(name, case=False, na=False)] - return swith - -def SaveResult(text, outputfileName): - #try: - basedir = os.path.dirname(__file__) - savePath = outputfileName - print("Saving: " + text + " to " + savePath) - from os.path import exists - file_exists = exists(savePath) - if file_exists: - with open(outputfileName, "a") as f: #append - #for line in text: - f.write(str(text.replace("\n"," "))) - f.write('\n') - else: - with open(outputfileName, "w") as f: #write - #for line in text: - f.write(str(text.replace("\n"," "))) - f.write('\n') - #except ValueError as err: - # raise ValueError("File Save Error in SaveResult \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None - - return - -def loadFile(filename): - try: - basedir = os.path.dirname(__file__) - loadPath = basedir + "\\" + filename - - print("Loading: " + loadPath) - - from os.path import exists - file_exists = exists(loadPath) - - if file_exists: - with open(loadPath, "r") as f: #read - contents = f.read() - print(contents) - return contents - - except ValueError as err: - raise ValueError("File Save Error in SaveResult \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None - - return "" - -def get_today_filename(): - from datetime import datetime - date = datetime.now().strftime("%Y_%m_%d-%I.%M.%S.%p") - #print(f"filename_{date}") 'filename_2023_01_12-03-29-22_AM' - return f"MedNER_{date}.csv" - -def get_base(filename): - basedir = os.path.dirname(__file__) - loadPath = basedir + "\\" + filename - #print("Loading: " + loadPath) - return loadPath - -def group_by_entity(raw): - outputFile = get_base(get_today_filename()) - out = defaultdict(int) - - for ent in raw: - out[ent["entity_group"]] += 1 - myEntityGroup = ent["entity_group"] - print("Found entity group type: " + myEntityGroup) - - if (myEntityGroup in ['Sign_symptom', 'Detailed_description', 'History', 'Activity', 'Medication' ]): - eterm = ent["word"].replace('#','') - minlength = 3 - if len(eterm) > minlength: - print("Found eterm: " + eterm) - eterm.replace("#","") - g1=MatchLOINC(eterm) - g2=MatchLOINCPanelsandForms(eterm) - g3=MatchSNOMED(eterm) - g4=MatchOMS(eterm) - g5=MatchICD10(eterm) - sAll = "" - - print("Saving to output file " + outputFile) - # Create harmonisation output format of input to output code, name, Text - - try: # 18 fields, output to labeled CSV dataset for results teaching on scored regret changes to action plan with data inputs - col = " 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" - - #LOINC - g11 = g1['LOINC_NUM'].to_string().replace(","," ").replace("\n"," ") - g12 = g1['COMPONENT'].to_string().replace(","," ").replace("\n"," ") - s1 = ("LOINC," + myEntityGroup + "," + eterm + ",questions of ," + g12 + "," + g11 + ", Label,Value, Label,Value, Label,Value ") - if g11 != 'Series([] )': SaveResult(s1, outputFile) - - #LOINC Panels - g21 = g2['Loinc'].to_string().replace(","," ").replace("\n"," ") - g22 = g2['LoincName'].to_string().replace(","," ").replace("\n"," ") - g23 = g2['ParentLoinc'].to_string().replace(","," ").replace("\n"," ") - g24 = g2['ParentName'].to_string().replace(","," ").replace("\n"," ") - # s2 = ("LOINC Panel," + myEntityGroup + "," + eterm + ",name of ," + g22 + "," + g21 + ", and Parent codes of ," + g23 + ", with Parent names of ," + g24 + ", Label,Value ") - s2 = ("LOINC Panel," + myEntityGroup + "," + eterm + ",name of ," + g22 + "," + g21 + "," + g24 + ", and Parent codes of ," + g23 + "," + ", Label,Value ") - if g21 != 'Series([] )': SaveResult(s2, outputFile) - - #SNOMED - g31 = g3['conceptId'].to_string().replace(","," ").replace("\n"," ").replace("\l"," ").replace("\r"," ") - g32 = g3['term'].to_string().replace(","," ").replace("\n"," ").replace("\l"," ").replace("\r"," ") - s3 = ("SNOMED Concept," + myEntityGroup + "," + eterm + ",terms of ," + g32 + "," + g31 + ", Label,Value, Label,Value, Label,Value ") - if g31 != 'Series([] )': SaveResult(s3, outputFile) - - #OMS - g41 = g4['Omaha Code'].to_string().replace(","," ").replace("\n"," ") - g42 = g4['SNOMED CT concept ID'].to_string().replace(","," ").replace("\n"," ") - g43 = g4['SNOMED CT'].to_string().replace(","," ").replace("\n"," ") - g44 = g4['PR'].to_string().replace(","," ").replace("\n"," ") - g45 = g4['S&S'].to_string().replace(","," ").replace("\n"," ") - s4 = ("OMS," + myEntityGroup + "," + eterm + ",concepts of ," + g44 + "," + g45 + ", and SNOMED codes of ," + g43 + ", and OMS problem of ," + g42 + ", and OMS Sign Symptom of ," + g41) - if g41 != 'Series([] )': SaveResult(s4, outputFile) - - #ICD10 - g51 = g5['Code'].to_string().replace(","," ").replace("\n"," ") - g52 = g5['Description'].to_string().replace(","," ").replace("\n"," ") - s5 = ("ICD10," + myEntityGroup + "," + eterm + ",descriptions of ," + g52 + "," + g51 + ", Label,Value, Label,Value, Label,Value ") - if g51 != 'Series([] )': SaveResult(s5, outputFile) - - except ValueError as err: - raise ValueError("Error in group by entity \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None - - return outputFile - - -def plot_to_figure(grouped): - fig = plt.figure() - plt.bar(x=list(grouped.keys()), height=list(grouped.values())) - plt.margins(0.2) - plt.subplots_adjust(bottom=0.4) - plt.xticks(rotation=90) - return fig - - -def ner(text): - raw = pipe(text) - ner_content = { - "text": text, - "entities": [ - { - "entity": x["entity_group"], - "word": x["word"], - "score": x["score"], - "start": x["start"], - "end": x["end"], - } - for x in raw - ], - } - - outputFile = group_by_entity(raw) - label = EXAMPLES.get(text, "Unknown") - outputDataframe = pd.read_csv(outputFile) - return (ner_content, outputDataframe, outputFile) - -demo = gr.Blocks() -with demo: - gr.Markdown( - """ - # 🩺⚕️NLP Clinical Ontology Biomedical NER - """ - ) - input = gr.Textbox(label="Note text", value="") - - with gr.Tab("Biomedical Entity Recognition"): - output=[ - gr.HighlightedText(label="NER", combine_adjacent=True), - #gr.JSON(label="Entity Counts"), - #gr.Label(label="Rating"), - #gr.Plot(label="Bar"), - gr.Dataframe(label="Dataframe"), - gr.File(label="File"), - ] - examples=list(EXAMPLES.keys()) - gr.Examples(examples, inputs=input) - input.change(fn=ner, inputs=input, outputs=output) - - with gr.Tab("Clinical Terminology Resolution"): - with gr.Row(variant="compact"): - btnLOINC = gr.Button("LOINC") - btnPanels = gr.Button("Panels") - btnSNOMED = gr.Button("SNOMED") - btnOMS = gr.Button("OMS") - btnICD10 = gr.Button("ICD10") - - examples=list(EXAMPLES.keys()) - gr.Examples(examples, inputs=input) - input.change(fn=ner, inputs=input, outputs=output) -#layout="vertical" -demo.launch(debug=True) diff --git a/spaces/Josiah-Adesola/Text-Summarizer-Bart/README.md b/spaces/Josiah-Adesola/Text-Summarizer-Bart/README.md deleted file mode 100644 index db7a42bcff23663328aba37e2b7e3a8c4ce57a0c..0000000000000000000000000000000000000000 --- a/spaces/Josiah-Adesola/Text-Summarizer-Bart/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text Summarizer Bart -emoji: 👁 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KNDLR/trash-ai/model.py b/spaces/KNDLR/trash-ai/model.py deleted file mode 100644 index c285b07a10b9bde9e663d6d6d031209abe65f36a..0000000000000000000000000000000000000000 --- a/spaces/KNDLR/trash-ai/model.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch -import torchvision.transforms as transforms -import torchvision.models as models -from pytorch_grad_cam import GradCAM -from pytorch_grad_cam.utils.image import show_cam_on_image -from PIL import Image -import numpy as np -import time - -class Prediction: - def __init__(self, data, heatmap, duration): - self.data = data - self.heatmap = heatmap - self.duration = duration - -class Pipeline: - def __init__(self): - self.classes = ['cardboard', 'glass', 'metal', 'paper', 'plastic', 'trash'] - self.transformations = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()]) - - if torch.cuda.is_available(): - self.device = torch.device('cuda') - else: - self.device = torch.device('cpu') - - self.model = {} - - resnet50_0810 = self.to_device(ResNet50(self.classes), self.device) - resnet50_0810.load_state_dict(torch.load('models/resnet50_0810.pt', map_location=self.device)) - resnet50_0810.eval() - resnet50_0810.cam = GradCAM(resnet50_0810.network, [resnet50_0810.network.layer4], torch.cuda.is_available()) - self.model["resnet50_0810"] = resnet50_0810 - - resnet152_0813 = self.to_device(ResNet152(self.classes), self.device) - resnet152_0813.load_state_dict(torch.load('models/resnet152_0813.pt', map_location=self.device)) - resnet152_0813.eval() - resnet152_0813.cam = GradCAM(resnet152_0813.network, [resnet152_0813.network.layer4], torch.cuda.is_available()) - self.model["resnet152_0813"] = resnet152_0813 - - resnet152_0902 = self.to_device(ResNet152(self.classes), self.device) - resnet152_0902.load_state_dict(torch.load('models/resnet152_0902.pt', map_location=self.device)) - resnet152_0902.eval() - resnet152_0902.cam = GradCAM(resnet152_0902.network, [resnet152_0902.network.layer4], torch.cuda.is_available()) - self.model["resnet152_0902"] = resnet152_0902 - - def to_device(self, data, device): - return data.to(device, torch.float32) - - def predict_image(self, model, image): - tensor = self.transformations(image) - xb = self.to_device(tensor.unsqueeze(0), self.device) - start_time = time.time() - yb = self.model[model](xb) - end_time = time.time() - data = {self.classes[i]: float(yb[0][i]) for i in range(len(self.classes))} - return Prediction(data, self.visualize(model, image, xb), int((end_time - start_time) * 1000)) - - def visualize(self, model, rgb_image, input_tensor): - rgb_image = rgb_image.resize((256, 256)) - rgb_image = np.array(rgb_image) - rgb_image = np.float32(rgb_image) / 255 - greyscale_cam = self.model[model].cam(input_tensor)[0, :] - image = show_cam_on_image(rgb_image, greyscale_cam, use_rgb=True) - return Image.fromarray(image) - -class ResNet50(torch.nn.Module): - def __init__(self, classes): - super().__init__() - self.network = models.resnet50(weights="DEFAULT") - self.network.fc = torch.nn.Linear(self.network.fc.in_features, len(classes)) - - def forward(self, xb): - return torch.sigmoid(self.network(xb)) - -class ResNet152(torch.nn.Module): - def __init__(self, classes): - super().__init__() - self.network = models.resnet152(weights="DEFAULT") - self.network.fc = torch.nn.Linear(self.network.fc.in_features, len(classes)) - - def forward(self, xb): - return torch.sigmoid(self.network(xb)) \ No newline at end of file diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/modules/ipex/hijacks.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/modules/ipex/hijacks.py deleted file mode 100644 index b06f3a9c1a70ef515c30d0e7d749923ecb8d0bfe..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/infer/modules/ipex/hijacks.py +++ /dev/null @@ -1,196 +0,0 @@ -import contextlib -import importlib -import torch -import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import - -# pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return - -class CondFunc: # pylint: disable=missing-class-docstring - def __new__(cls, orig_func, sub_func, cond_func): - self = super(CondFunc, cls).__new__(cls) - if isinstance(orig_func, str): - func_path = orig_func.split('.') - for i in range(len(func_path)-1, -1, -1): - try: - resolved_obj = importlib.import_module('.'.join(func_path[:i])) - break - except ImportError: - pass - for attr_name in func_path[i:-1]: - resolved_obj = getattr(resolved_obj, attr_name) - orig_func = getattr(resolved_obj, func_path[-1]) - setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) - self.__init__(orig_func, sub_func, cond_func) - return lambda *args, **kwargs: self(*args, **kwargs) - def __init__(self, orig_func, sub_func, cond_func): - self.__orig_func = orig_func - self.__sub_func = sub_func - self.__cond_func = cond_func - def __call__(self, *args, **kwargs): - if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): - return self.__sub_func(self.__orig_func, *args, **kwargs) - else: - return self.__orig_func(*args, **kwargs) - -_utils = torch.utils.data._utils -def _shutdown_workers(self): - if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None: - return - if hasattr(self, "_shutdown") and not self._shutdown: - self._shutdown = True - try: - if hasattr(self, '_pin_memory_thread'): - self._pin_memory_thread_done_event.set() - self._worker_result_queue.put((None, None)) - self._pin_memory_thread.join() - self._worker_result_queue.cancel_join_thread() - self._worker_result_queue.close() - self._workers_done_event.set() - for worker_id in range(len(self._workers)): - if self._persistent_workers or self._workers_status[worker_id]: - self._mark_worker_as_unavailable(worker_id, shutdown=True) - for w in self._workers: # pylint: disable=invalid-name - w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL) - for q in self._index_queues: # pylint: disable=invalid-name - q.cancel_join_thread() - q.close() - finally: - if self._worker_pids_set: - torch.utils.data._utils.signal_handling._remove_worker_pids(id(self)) - self._worker_pids_set = False - for w in self._workers: # pylint: disable=invalid-name - if w.is_alive(): - w.terminate() - -class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods - def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument - if isinstance(device_ids, list) and len(device_ids) > 1: - print("IPEX backend doesn't support DataParallel on multiple XPU devices") - return module.to("xpu") - -def return_null_context(*args, **kwargs): # pylint: disable=unused-argument - return contextlib.nullcontext() - -def check_device(device): - return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int)) - -def return_xpu(device): - return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu" - -def ipex_no_cuda(orig_func, *args, **kwargs): - torch.cuda.is_available = lambda: False - orig_func(*args, **kwargs) - torch.cuda.is_available = torch.xpu.is_available - -original_autocast = torch.autocast -def ipex_autocast(*args, **kwargs): - if len(args) > 0 and args[0] == "cuda": - return original_autocast("xpu", *args[1:], **kwargs) - else: - return original_autocast(*args, **kwargs) - -original_torch_cat = torch.cat -def torch_cat(tensor, *args, **kwargs): - if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype): - return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs) - else: - return original_torch_cat(tensor, *args, **kwargs) - -original_interpolate = torch.nn.functional.interpolate -def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments - if antialias or align_corners is not None: - return_device = tensor.device - return_dtype = tensor.dtype - return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode, - align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype) - else: - return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode, - align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias) - -original_linalg_solve = torch.linalg.solve -def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name - if A.device != torch.device("cpu") or B.device != torch.device("cpu"): - return_device = A.device - return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device) - else: - return original_linalg_solve(A, B, *args, **kwargs) - -def ipex_hijacks(): - CondFunc('torch.Tensor.to', - lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs), - lambda orig_func, self, device=None, *args, **kwargs: check_device(device)) - CondFunc('torch.Tensor.cuda', - lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs), - lambda orig_func, self, device=None, *args, **kwargs: check_device(device)) - CondFunc('torch.empty', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.load', - lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs), - lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location)) - CondFunc('torch.randn', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.ones', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.zeros', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.tensor', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.linspace', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - - CondFunc('torch.Generator', - lambda orig_func, device=None: torch.xpu.Generator(device), - lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu") - - CondFunc('torch.batch_norm', - lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input, - weight if weight is not None else torch.ones(input.size()[1], device=input.device), - bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs), - lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu")) - CondFunc('torch.instance_norm', - lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input, - weight if weight is not None else torch.ones(input.size()[1], device=input.device), - bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs), - lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu")) - - #Functions with dtype errors: - CondFunc('torch.nn.modules.GroupNorm.forward', - lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), - lambda orig_func, self, input: input.dtype != self.weight.data.dtype) - CondFunc('torch.nn.modules.linear.Linear.forward', - lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), - lambda orig_func, self, input: input.dtype != self.weight.data.dtype) - CondFunc('torch.nn.modules.conv.Conv2d.forward', - lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), - lambda orig_func, self, input: input.dtype != self.weight.data.dtype) - CondFunc('torch.nn.functional.layer_norm', - lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: - orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), - lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: - weight is not None and input.dtype != weight.data.dtype) - - #Diffusers Float64 (ARC GPUs doesn't support double or Float64): - if not torch.xpu.has_fp64_dtype(): - CondFunc('torch.from_numpy', - lambda orig_func, ndarray: orig_func(ndarray.astype('float32')), - lambda orig_func, ndarray: ndarray.dtype == float) - - #Broken functions when torch.cuda.is_available is True: - CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__', - lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs), - lambda orig_func, *args, **kwargs: True) - - #Functions that make compile mad with CondFunc: - torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers - torch.nn.DataParallel = DummyDataParallel - torch.autocast = ipex_autocast - torch.cat = torch_cat - torch.linalg.solve = linalg_solve - torch.nn.functional.interpolate = interpolate - torch.backends.cuda.sdp_kernel = return_null_context \ No newline at end of file diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_123821KB.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_123821KB.py deleted file mode 100644 index b82f06bb4993cd63f076e68d7e24185269b1bc42..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_123821KB.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/Kevin676/AutoGPT/autogpt/config/config.py b/spaces/Kevin676/AutoGPT/autogpt/config/config.py deleted file mode 100644 index 4b53df10e8d2832be7ffb321d9036aec5a47a79d..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/config/config.py +++ /dev/null @@ -1,251 +0,0 @@ -"""Configuration class to store the state of bools for different scripts access.""" -import os - -import openai -import yaml -from colorama import Fore -from dotenv import load_dotenv - -from autogpt.config.singleton import Singleton - -load_dotenv(verbose=True) - - -class Config(metaclass=Singleton): - """ - Configuration class to store the state of bools for different scripts access. - """ - - def __init__(self) -> None: - """Initialize the Config class""" - self.debug_mode = False - self.continuous_mode = False - self.continuous_limit = 0 - self.speak_mode = False - self.skip_reprompt = False - self.allow_downloads = False - self.skip_news = False - - self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") - self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") - self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") - self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) - self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) - self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) - - self.openai_api_key = os.getenv("OPENAI_API_KEY") - self.temperature = float(os.getenv("TEMPERATURE", "1")) - self.use_azure = os.getenv("USE_AZURE") == "True" - self.execute_local_commands = ( - os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True" - ) - self.restrict_to_workspace = ( - os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True" - ) - - if self.use_azure: - self.load_azure_config() - openai.api_type = self.openai_api_type - openai.api_base = self.openai_api_base - openai.api_version = self.openai_api_version - - self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") - self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID") - self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID") - - self.use_mac_os_tts = False - self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") - - self.use_brian_tts = False - self.use_brian_tts = os.getenv("USE_BRIAN_TTS") - - self.github_api_key = os.getenv("GITHUB_API_KEY") - self.github_username = os.getenv("GITHUB_USERNAME") - - self.google_api_key = os.getenv("GOOGLE_API_KEY") - self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") - - self.pinecone_api_key = os.getenv("PINECONE_API_KEY") - self.pinecone_region = os.getenv("PINECONE_ENV") - - self.weaviate_host = os.getenv("WEAVIATE_HOST") - self.weaviate_port = os.getenv("WEAVIATE_PORT") - self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http") - self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None) - self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None) - self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None) - self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH") - self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None) - self.use_weaviate_embedded = ( - os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" - ) - - # milvus configuration, e.g., localhost:19530. - self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") - self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt") - - self.image_provider = os.getenv("IMAGE_PROVIDER") - self.image_size = int(os.getenv("IMAGE_SIZE", 256)) - self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") - self.huggingface_image_model = os.getenv( - "HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4" - ) - self.huggingface_audio_to_text_model = os.getenv( - "HUGGINGFACE_AUDIO_TO_TEXT_MODEL" - ) - self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860") - self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH") - - # Selenium browser settings - self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome") - self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True" - - # User agent header to use when making HTTP requests - # Some websites might just completely deny request with an error code if - # no user agent was found. - self.user_agent = os.getenv( - "USER_AGENT", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36" - " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", - ) - - self.redis_host = os.getenv("REDIS_HOST", "localhost") - self.redis_port = os.getenv("REDIS_PORT", "6379") - self.redis_password = os.getenv("REDIS_PASSWORD", "") - self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" - self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt") - # Note that indexes must be created on db 0 in redis, this is not configurable. - - self.memory_backend = os.getenv("MEMORY_BACKEND", "local") - # Initialize the OpenAI API client - openai.api_key = self.openai_api_key - - def get_azure_deployment_id_for_model(self, model: str) -> str: - """ - Returns the relevant deployment id for the model specified. - - Parameters: - model(str): The model to map to the deployment id. - - Returns: - The matching deployment id if found, otherwise an empty string. - """ - if model == self.fast_llm_model: - return self.azure_model_to_deployment_id_map[ - "fast_llm_model_deployment_id" - ] # type: ignore - elif model == self.smart_llm_model: - return self.azure_model_to_deployment_id_map[ - "smart_llm_model_deployment_id" - ] # type: ignore - elif model == "text-embedding-ada-002": - return self.azure_model_to_deployment_id_map[ - "embedding_model_deployment_id" - ] # type: ignore - else: - return "" - - AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml") - - def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None: - """ - Loads the configuration parameters for Azure hosting from the specified file - path as a yaml file. - - Parameters: - config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml" - - Returns: - None - """ - try: - with open(config_file) as file: - config_params = yaml.load(file, Loader=yaml.FullLoader) - except FileNotFoundError: - config_params = {} - self.openai_api_type = config_params.get("azure_api_type") or "azure" - self.openai_api_base = config_params.get("azure_api_base") or "" - self.openai_api_version = ( - config_params.get("azure_api_version") or "2023-03-15-preview" - ) - self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) - - def set_continuous_mode(self, value: bool) -> None: - """Set the continuous mode value.""" - self.continuous_mode = value - - def set_continuous_limit(self, value: int) -> None: - """Set the continuous limit value.""" - self.continuous_limit = value - - def set_speak_mode(self, value: bool) -> None: - """Set the speak mode value.""" - self.speak_mode = value - - def set_fast_llm_model(self, value: str) -> None: - """Set the fast LLM model value.""" - self.fast_llm_model = value - - def set_smart_llm_model(self, value: str) -> None: - """Set the smart LLM model value.""" - self.smart_llm_model = value - - def set_fast_token_limit(self, value: int) -> None: - """Set the fast token limit value.""" - self.fast_token_limit = value - - def set_smart_token_limit(self, value: int) -> None: - """Set the smart token limit value.""" - self.smart_token_limit = value - - def set_browse_chunk_max_length(self, value: int) -> None: - """Set the browse_website command chunk max length value.""" - self.browse_chunk_max_length = value - - def set_openai_api_key(self, value: str) -> None: - """Set the OpenAI API key value.""" - self.openai_api_key = value - - def set_elevenlabs_api_key(self, value: str) -> None: - """Set the ElevenLabs API key value.""" - self.elevenlabs_api_key = value - - def set_elevenlabs_voice_1_id(self, value: str) -> None: - """Set the ElevenLabs Voice 1 ID value.""" - self.elevenlabs_voice_1_id = value - - def set_elevenlabs_voice_2_id(self, value: str) -> None: - """Set the ElevenLabs Voice 2 ID value.""" - self.elevenlabs_voice_2_id = value - - def set_google_api_key(self, value: str) -> None: - """Set the Google API key value.""" - self.google_api_key = value - - def set_custom_search_engine_id(self, value: str) -> None: - """Set the custom search engine id value.""" - self.custom_search_engine_id = value - - def set_pinecone_api_key(self, value: str) -> None: - """Set the Pinecone API key value.""" - self.pinecone_api_key = value - - def set_pinecone_region(self, value: str) -> None: - """Set the Pinecone region value.""" - self.pinecone_region = value - - def set_debug_mode(self, value: bool) -> None: - """Set the debug mode value.""" - self.debug_mode = value - - -def check_openai_api_key() -> None: - """Check if the OpenAI API key is set in config.py or as an environment variable.""" - cfg = Config() - if not cfg.openai_api_key: - print( - Fore.RED - + "Please set your OpenAI API key in .env or as an environment variable." - ) - print("You can get your key from https://platform.openai.com/account/api-keys") - exit(1) diff --git a/spaces/KevinQHLin/UniVTG/run_on_video/preprocessing.py b/spaces/KevinQHLin/UniVTG/run_on_video/preprocessing.py deleted file mode 100644 index 93b3e8112b299c93667fb433ac683fa7f46e0fda..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/run_on_video/preprocessing.py +++ /dev/null @@ -1,25 +0,0 @@ -import torch as th - - -class Normalize(object): - - def __init__(self, mean, std): - self.mean = th.FloatTensor(mean).view(1, 3, 1, 1) - self.std = th.FloatTensor(std).view(1, 3, 1, 1) - - def __call__(self, tensor): - tensor = (tensor - self.mean) / (self.std + 1e-8) - return tensor - - -class Preprocessing(object): - - def __init__(self): - self.norm = Normalize( - mean=[0.48145466, 0.4578275, 0.40821073], - std=[0.26862954, 0.26130258, 0.27577711]) - - def __call__(self, tensor): - tensor = tensor / 255.0 - tensor = self.norm(tensor) - return tensor diff --git a/spaces/Lamai/LAMAIGPT/autogpt/utils.py b/spaces/Lamai/LAMAIGPT/autogpt/utils.py deleted file mode 100644 index e93d5ac740097ee144d1809aea31c0f7fb242fa5..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/autogpt/utils.py +++ /dev/null @@ -1,77 +0,0 @@ -import os - -import requests -import yaml -from colorama import Fore -from git import Repo - - -def clean_input(prompt: str = ""): - try: - return input(prompt) - except KeyboardInterrupt: - print("You interrupted Auto-GPT") - print("Quitting...") - exit(0) - - -def validate_yaml_file(file: str): - try: - with open(file, encoding="utf-8") as fp: - yaml.load(fp.read(), Loader=yaml.FullLoader) - except FileNotFoundError: - return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found") - except yaml.YAMLError as e: - return ( - False, - f"There was an issue while trying to read with your AI Settings file: {e}", - ) - - return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") - - -def readable_file_size(size, decimal_places=2): - """Converts the given size in bytes to a readable format. - Args: - size: Size in bytes - decimal_places (int): Number of decimal places to display - """ - for unit in ["B", "KB", "MB", "GB", "TB"]: - if size < 1024.0: - break - size /= 1024.0 - return f"{size:.{decimal_places}f} {unit}" - - -def get_bulletin_from_web() -> str: - try: - response = requests.get( - "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" - ) - if response.status_code == 200: - return response.text - except: - return "" - - -def get_current_git_branch() -> str: - try: - repo = Repo(search_parent_directories=True) - branch = repo.active_branch - return branch.name - except: - return "" - - -def get_latest_bulletin() -> str: - exists = os.path.exists("CURRENT_BULLETIN.md") - current_bulletin = "" - if exists: - current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read() - new_bulletin = get_bulletin_from_web() - is_new_news = new_bulletin != current_bulletin - - if new_bulletin and is_new_news: - open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) - return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}" - return current_bulletin diff --git a/spaces/LinkSoul/LLaSM/static/css/fontawesome.all.min.css b/spaces/LinkSoul/LLaSM/static/css/fontawesome.all.min.css deleted file mode 100644 index 656a50745f7224b3eca827869677851c705b26c9..0000000000000000000000000000000000000000 --- a/spaces/LinkSoul/LLaSM/static/css/fontawesome.all.min.css +++ /dev/null @@ -1,5 +0,0 @@ -/*! - * Font Awesome Free 5.15.1 by @fontawesome - https://fontawesome.com - * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) - */ -.fa,.fab,.fad,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}:root .fa-flip-both,:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{-webkit-filter:none;filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-airbnb:before{content:"\f834"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-bacon:before{content:"\f7e5"}.fa-bacteria:before{content:"\e059"}.fa-bacterium:before{content:"\e05a"}.fa-bahai:before{content:"\f666"}.fa-balance-scale:before{content:"\f24e"}.fa-balance-scale-left:before{content:"\f515"}.fa-balance-scale-right:before{content:"\f516"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-battle-net:before{content:"\f835"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-biking:before{content:"\f84a"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-medical:before{content:"\f7e6"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bootstrap:before{content:"\f836"}.fa-border-all:before{content:"\f84c"}.fa-border-none:before{content:"\f850"}.fa-border-style:before{content:"\f853"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-box-tissue:before{content:"\e05b"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-bread-slice:before{content:"\f7ec"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-buffer:before{content:"\f837"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buy-n-large:before{content:"\f8a6"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caravan:before{content:"\f8ff"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-cheese:before{content:"\f7ef"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-chromecast:before{content:"\f838"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clinic-medical:before{content:"\f7f2"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudflare:before{content:"\e07d"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-medical:before{content:"\f7f5"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-alt:before{content:"\f422"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-cotton-bureau:before{content:"\f89e"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-crutch:before{content:"\f7f7"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dailymotion:before{content:"\e052"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-deezer:before{content:"\e077"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-disease:before{content:"\f7fa"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edge-legacy:before{content:"\e078"}.fa-edit:before{content:"\f044"}.fa-egg:before{content:"\f7fb"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-evernote:before{content:"\f839"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-alt:before{content:"\f424"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fan:before{content:"\f863"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-faucet:before{content:"\e005"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-firefox-browser:before{content:"\e007"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-alt:before{content:"\f841"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-pay:before{content:"\e079"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guilded:before{content:"\e07e"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hamburger:before{content:"\f805"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-medical:before{content:"\e05c"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-holding-water:before{content:"\f4c1"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-middle-finger:before{content:"\f806"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-sparkles:before{content:"\e05d"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-hands-wash:before{content:"\e05e"}.fa-handshake:before{content:"\f2b5"}.fa-handshake-alt-slash:before{content:"\e05f"}.fa-handshake-slash:before{content:"\e060"}.fa-hanukiah:before{content:"\f6e6"}.fa-hard-hat:before{content:"\f807"}.fa-hashtag:before{content:"\f292"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-hat-wizard:before{content:"\f6e8"}.fa-hdd:before{content:"\f0a0"}.fa-head-side-cough:before{content:"\e061"}.fa-head-side-cough-slash:before{content:"\e062"}.fa-head-side-mask:before{content:"\e063"}.fa-head-side-virus:before{content:"\e064"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hive:before{content:"\e07f"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hospital-user:before{content:"\f80d"}.fa-hot-tub:before{content:"\f593"}.fa-hotdog:before{content:"\f80f"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-house-user:before{content:"\e065"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-ice-cream:before{content:"\f810"}.fa-icicles:before{content:"\f7ad"}.fa-icons:before{content:"\f86d"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-ideal:before{content:"\e013"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-innosoft:before{content:"\e080"}.fa-instagram:before{content:"\f16d"}.fa-instagram-square:before{content:"\e055"}.fa-instalod:before{content:"\e081"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itch-io:before{content:"\f83a"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laptop-house:before{content:"\e066"}.fa-laptop-medical:before{content:"\f812"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lungs:before{content:"\f604"}.fa-lungs-virus:before{content:"\e067"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-mdb:before{content:"\f8ca"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microblog:before{content:"\e01a"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mixer:before{content:"\e056"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse:before{content:"\f8cc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-octopus-deploy:before{content:"\e082"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-orcid:before{content:"\f8d2"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-pager:before{content:"\f815"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-arrows:before{content:"\e068"}.fa-people-carry:before{content:"\f4ce"}.fa-pepper-hot:before{content:"\f816"}.fa-perbyte:before{content:"\e083"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-alt:before{content:"\f879"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-square-alt:before{content:"\f87b"}.fa-phone-volume:before{content:"\f2a0"}.fa-photo-video:before{content:"\f87c"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-square:before{content:"\e01e"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-pizza-slice:before{content:"\f818"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-plane-slash:before{content:"\e069"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pump-medical:before{content:"\e06a"}.fa-pump-soap:before{content:"\e06b"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-record-vinyl:before{content:"\f8d9"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-remove-format:before{content:"\f87d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-rust:before{content:"\e07a"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-salesforce:before{content:"\f83b"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-shield-virus:before{content:"\e06c"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopify:before{content:"\e057"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sink:before{content:"\e06d"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-soap:before{content:"\e06e"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-down-alt:before{content:"\f884"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-amount-up-alt:before{content:"\f885"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-speaker-deck:before{content:"\f83c"}.fa-spell-check:before{content:"\f891"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stackpath:before{content:"\f842"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-stopwatch-20:before{content:"\e06f"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-store-alt-slash:before{content:"\e070"}.fa-store-slash:before{content:"\e071"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swift:before{content:"\f8e1"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-symfony:before{content:"\f83d"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-tiktok:before{content:"\e07b"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toilet-paper-slash:before{content:"\e072"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-trailer:before{content:"\e041"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-trash-restore:before{content:"\f829"}.fa-trash-restore-alt:before{content:"\f82a"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbraco:before{content:"\f8e8"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-uncharted:before{content:"\e084"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-unity:before{content:"\e049"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-unsplash:before{content:"\e07c"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-nurse:before{content:"\f82f"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-users-slash:before{content:"\e073"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-vest:before{content:"\e085"}.fa-vest-patches:before{content:"\e086"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-virus:before{content:"\e074"}.fa-virus-slash:before{content:"\e075"}.fa-viruses:before{content:"\e076"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-voicemail:before{content:"\f897"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-watchman-monitoring:before{content:"\e087"}.fa-water:before{content:"\f773"}.fa-wave-square:before{content:"\f83e"}.fa-waze:before{content:"\f83f"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wodu:before{content:"\e088"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yammer:before{content:"\f840"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.fab,.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/spaces/Liu-LAB/GPT-academic/request_llm/bridge_chatglmonnx.py b/spaces/Liu-LAB/GPT-academic/request_llm/bridge_chatglmonnx.py deleted file mode 100644 index 594bcca15f04c7d9790da95fee2a1d51252c07d1..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/request_llm/bridge_chatglmonnx.py +++ /dev/null @@ -1,73 +0,0 @@ -model_name = "ChatGLM-ONNX" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm_onnx.txt`" - - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM - -from .chatglmoonx import ChatGLMModel, chat_template - - - -# ------------------------------------------------------------------------------------------------------------------------ -# 🔌💻 Local Model -# ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM -class GetONNXGLMHandle(LocalLLMHandle): - - def load_model_info(self): - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 - self.model_name = model_name - self.cmd_to_install = cmd_to_install - - def load_model_and_tokenizer(self): - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 - import os, glob - if not len(glob.glob("./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件 - from huggingface_hub import snapshot_download - snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llm/ChatGLM-6b-onnx-u8s8") - def create_model(): - return ChatGLMModel( - tokenizer_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model", - onnx_model_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx" - ) - self._model = create_model() - return self._model, None - - def llm_stream_generator(self, **kwargs): - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 - def adaptor(kwargs): - query = kwargs['query'] - max_length = kwargs['max_length'] - top_p = kwargs['top_p'] - temperature = kwargs['temperature'] - history = kwargs['history'] - return query, max_length, top_p, temperature, history - - query, max_length, top_p, temperature, history = adaptor(kwargs) - - prompt = chat_template(history, query) - for answer in self._model.generate_iterate( - prompt, - max_generated_tokens=max_length, - top_k=1, - top_p=top_p, - temperature=temperature, - ): - yield answer - - def try_to_import_special_deps(self, **kwargs): - # import something that will raise error if the user does not install requirement_*.txt - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 - pass - - -# ------------------------------------------------------------------------------------------------------------------------ -# 🔌💻 GPT-Academic Interface -# ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name) \ No newline at end of file diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/kie/sdmgr/README.md b/spaces/Loren/Streamlit_OCR_comparator/configs/kie/sdmgr/README.md deleted file mode 100644 index 645696b75c76e496c394a8f6773a8fa8a0d939da..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/kie/sdmgr/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# SDMGR - -> [Spatial Dual-Modality Graph Reasoning for Key Information Extraction](https://arxiv.org/abs/2103.14470) - - - -## Abstract - -Key information extraction from document images is of paramount importance in office automation. Conventional template matching based approaches fail to generalize well to document images of unseen templates, and are not robust against text recognition errors. In this paper, we propose an end-to-end Spatial Dual-Modality Graph Reasoning method (SDMG-R) to extract key information from unstructured document images. We model document images as dual-modality graphs, nodes of which encode both the visual and textual features of detected text regions, and edges of which represent the spatial relations between neighboring text regions. The key information extraction is solved by iteratively propagating messages along graph edges and reasoning the categories of graph nodes. In order to roundly evaluate our proposed method as well as boost the future research, we release a new dataset named WildReceipt, which is collected and annotated tailored for the evaluation of key information extraction from document images of unseen templates in the wild. It contains 25 key information categories, a total of about 69000 text boxes, and is about 2 times larger than the existing public datasets. Extensive experiments validate that all information including visual features, textual features and spatial relations can benefit key information extraction. It has been shown that SDMG-R can effectively extract key information from document images of unseen templates, and obtain new state-of-the-art results on the recent popular benchmark SROIE and our WildReceipt. Our code and dataset will be publicly released. - -
      - -
      - -## Results and models - -### WildReceipt - -| Method | Modality | Macro F1-Score | Download | -| :--------------------------------------------------------------------: | :--------------: | :------------: | :--------------------------------------------------------------------------------------------------: | -| [sdmgr_unet16](/configs/kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py) | Visual + Textual | 0.888 | [model](https://download.openmmlab.com/mmocr/kie/sdmgr/sdmgr_unet16_60e_wildreceipt_20210520-7489e6de.pth) \| [log](https://download.openmmlab.com/mmocr/kie/sdmgr/20210520_132236.log.json) | -| [sdmgr_novisual](/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt.py) | Textual | 0.870 | [model](https://download.openmmlab.com/mmocr/kie/sdmgr/sdmgr_novisual_60e_wildreceipt_20210517-a44850da.pth) \| [log](https://download.openmmlab.com/mmocr/kie/sdmgr/20210517_205829.log.json) | - -```{note} -1. For `sdmgr_novisual`, images are not needed for training and testing. So fake `img_prefix` can be used in configs. As well, fake `file_name` can be used in annotation files. -``` - -### WildReceiptOpenset - -| Method | Modality | Edge F1-Score | Node Macro F1-Score | Node Micro F1-Score | Download | -| :-------------------------------------------------------------------: | :------: | :-----------: | :-----------------: | :-----------------: | :----------------------------------------------------------------------: | -| [sdmgr_novisual](/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt_openset.py) | Textual | 0.786 | 0.926 | 0.935 | [model](https://download.openmmlab.com/mmocr/kie/sdmgr/sdmgr_novisual_60e_wildreceipt_openset_20210917-d236b3ea.pth) \| [log](https://download.openmmlab.com/mmocr/kie/sdmgr/20210917_050824.log.json) | - -```{note} -1. In the case of openset, the number of node categories is unknown or unfixed, and more node category can be added. -2. To show that our method can handle openset problem, we modify the ground truth of `WildReceipt` to `WildReceiptOpenset`. The `nodes` are just classified into 4 classes: `background, key, value, others`, while adding `edge` labels for each box. -3. The model is used to predict whether two nodes are a pair connecting by a valid edge. -4. You can learn more about the key differences between CloseSet and OpenSet annotations in our [tutorial](tutorials/kie_closeset_openset.md). -``` - -## Citation - -```bibtex -@misc{sun2021spatial, - title={Spatial Dual-Modality Graph Reasoning for Key Information Extraction}, - author={Hongbin Sun and Zhanghui Kuang and Xiaoyu Yue and Chenhao Lin and Wayne Zhang}, - year={2021}, - eprint={2103.14470}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/networks/encoder.py b/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/networks/encoder.py deleted file mode 100644 index 76acf690fd527bb9bd1dfc0c07c82573a1026d88..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/networks/encoder.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch.nn as nn -import numpy as np -import torch.nn.functional as F -from models.networks.base_network import BaseNetwork -from models.networks.normalization import get_nonspade_norm_layer - - -class ConvEncoder(BaseNetwork): - """ Same architecture as the image discriminator """ - - def __init__(self, opt): - super().__init__() - - kw = 3 - pw = int(np.ceil((kw - 1.0) / 2)) - ndf = opt.ngf - norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) - self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw)) - self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw)) - self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw)) - self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw)) - self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) - if opt.crop_size >= 256: - self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) - - self.so = s0 = 4 - self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256) - self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256) - - self.actvn = nn.LeakyReLU(0.2, False) - self.opt = opt - - def forward(self, x): - if x.size(2) != 256 or x.size(3) != 256: - x = F.interpolate(x, size=(256, 256), mode="bilinear") - - x = self.layer1(x) - x = self.layer2(self.actvn(x)) - x = self.layer3(self.actvn(x)) - x = self.layer4(self.actvn(x)) - x = self.layer5(self.actvn(x)) - if self.opt.crop_size >= 256: - x = self.layer6(self.actvn(x)) - x = self.actvn(x) - - x = x.view(x.size(0), -1) - mu = self.fc_mu(x) - logvar = self.fc_var(x) - - return mu, logvar diff --git a/spaces/MCkernick/Image_Restoration_Colorization/predict.py b/spaces/MCkernick/Image_Restoration_Colorization/predict.py deleted file mode 100644 index 5573cd1a64d8357641299517338011e7e1aa1ac1..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/predict.py +++ /dev/null @@ -1,222 +0,0 @@ -import tempfile -from pathlib import Path -import argparse -import shutil -import os -import glob -import cv2 -import cog -from run import run_cmd - - -class Predictor(cog.Predictor): - def setup(self): - parser = argparse.ArgumentParser() - parser.add_argument( - "--input_folder", type=str, default="input/cog_temp", help="Test images" - ) - parser.add_argument( - "--output_folder", - type=str, - default="output", - help="Restored images, please use the absolute path", - ) - parser.add_argument("--GPU", type=str, default="0", help="0,1,2") - parser.add_argument( - "--checkpoint_name", - type=str, - default="Setting_9_epoch_100", - help="choose which checkpoint", - ) - self.opts = parser.parse_args("") - self.basepath = os.getcwd() - self.opts.input_folder = os.path.join(self.basepath, self.opts.input_folder) - self.opts.output_folder = os.path.join(self.basepath, self.opts.output_folder) - os.makedirs(self.opts.input_folder, exist_ok=True) - os.makedirs(self.opts.output_folder, exist_ok=True) - - @cog.input("image", type=Path, help="input image") - @cog.input( - "HR", - type=bool, - default=False, - help="whether the input image is high-resolution", - ) - @cog.input( - "with_scratch", - type=bool, - default=False, - help="whether the input image is scratched", - ) - def predict(self, image, HR=False, with_scratch=False): - try: - os.chdir(self.basepath) - input_path = os.path.join(self.opts.input_folder, os.path.basename(image)) - shutil.copy(str(image), input_path) - - gpu1 = self.opts.GPU - - ## Stage 1: Overall Quality Improve - print("Running Stage 1: Overall restoration") - os.chdir("./Global") - stage_1_input_dir = self.opts.input_folder - stage_1_output_dir = os.path.join( - self.opts.output_folder, "stage_1_restore_output" - ) - - os.makedirs(stage_1_output_dir, exist_ok=True) - - if not with_scratch: - - stage_1_command = ( - "python test.py --test_mode Full --Quality_restore --test_input " - + stage_1_input_dir - + " --outputs_dir " - + stage_1_output_dir - + " --gpu_ids " - + gpu1 - ) - run_cmd(stage_1_command) - else: - - mask_dir = os.path.join(stage_1_output_dir, "masks") - new_input = os.path.join(mask_dir, "input") - new_mask = os.path.join(mask_dir, "mask") - stage_1_command_1 = ( - "python detection.py --test_path " - + stage_1_input_dir - + " --output_dir " - + mask_dir - + " --input_size full_size" - + " --GPU " - + gpu1 - ) - - if HR: - HR_suffix = " --HR" - else: - HR_suffix = "" - - stage_1_command_2 = ( - "python test.py --Scratch_and_Quality_restore --test_input " - + new_input - + " --test_mask " - + new_mask - + " --outputs_dir " - + stage_1_output_dir - + " --gpu_ids " - + gpu1 - + HR_suffix - ) - - run_cmd(stage_1_command_1) - run_cmd(stage_1_command_2) - - ## Solve the case when there is no face in the old photo - stage_1_results = os.path.join(stage_1_output_dir, "restored_image") - stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output") - os.makedirs(stage_4_output_dir, exist_ok=True) - for x in os.listdir(stage_1_results): - img_dir = os.path.join(stage_1_results, x) - shutil.copy(img_dir, stage_4_output_dir) - - print("Finish Stage 1 ...") - print("\n") - - ## Stage 2: Face Detection - - print("Running Stage 2: Face Detection") - os.chdir(".././Face_Detection") - stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image") - stage_2_output_dir = os.path.join( - self.opts.output_folder, "stage_2_detection_output" - ) - os.makedirs(stage_2_output_dir, exist_ok=True) - - stage_2_command = ( - "python detect_all_dlib_HR.py --url " - + stage_2_input_dir - + " --save_url " - + stage_2_output_dir - ) - - run_cmd(stage_2_command) - print("Finish Stage 2 ...") - print("\n") - - ## Stage 3: Face Restore - print("Running Stage 3: Face Enhancement") - os.chdir(".././Face_Enhancement") - stage_3_input_mask = "./" - stage_3_input_face = stage_2_output_dir - stage_3_output_dir = os.path.join( - self.opts.output_folder, "stage_3_face_output" - ) - - os.makedirs(stage_3_output_dir, exist_ok=True) - - self.opts.checkpoint_name = "FaceSR_512" - stage_3_command = ( - "python test_face.py --old_face_folder " - + stage_3_input_face - + " --old_face_label_folder " - + stage_3_input_mask - + " --tensorboard_log --name " - + self.opts.checkpoint_name - + " --gpu_ids " - + gpu1 - + " --load_size 512 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 1 --results_dir " - + stage_3_output_dir - + " --no_parsing_map" - ) - - run_cmd(stage_3_command) - print("Finish Stage 3 ...") - print("\n") - - ## Stage 4: Warp back - print("Running Stage 4: Blending") - os.chdir(".././Face_Detection") - stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image") - stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img") - stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output") - os.makedirs(stage_4_output_dir, exist_ok=True) - - stage_4_command = ( - "python align_warp_back_multiple_dlib_HR.py --origin_url " - + stage_4_input_image_dir - + " --replace_url " - + stage_4_input_face_dir - + " --save_url " - + stage_4_output_dir - ) - - run_cmd(stage_4_command) - print("Finish Stage 4 ...") - print("\n") - - print("All the processing is done. Please check the results.") - - final_output = os.listdir(os.path.join(self.opts.output_folder, "final_output"))[0] - - image_restore = cv2.imread(os.path.join(self.opts.output_folder, "final_output", final_output)) - - out_path = Path(tempfile.mkdtemp()) / "out.png" - - cv2.imwrite(str(out_path), image_restore) - finally: - clean_folder(self.opts.input_folder) - clean_folder(self.opts.output_folder) - return out_path - - -def clean_folder(folder): - for filename in os.listdir(folder): - file_path = os.path.join(folder, filename) - try: - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - except Exception as e: - print(f"Failed to delete {file_path}. Reason:{e}") diff --git a/spaces/MMMMQZ/MQZGPT/run_Windows.bat b/spaces/MMMMQZ/MQZGPT/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/MMMMQZ/MQZGPT/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/MMars/Question_Answering_DistilBert_Finetuned_on_SQuAD/app.py b/spaces/MMars/Question_Answering_DistilBert_Finetuned_on_SQuAD/app.py deleted file mode 100644 index a6b0dba0a4a6968ff4e58a3a41bd7a29bec770f4..0000000000000000000000000000000000000000 --- a/spaces/MMars/Question_Answering_DistilBert_Finetuned_on_SQuAD/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr - -title = "Question_Answering_DistilBert_Finetuned_on_SQuAD_Demo" - -context = """ -The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species. -""" - -question = "Which name is also used to describe the Amazon rainforest in English?" - -gr.Interface.load("huggingface/MMars/distilbert-base-uncased-finetuned-squad", - title=title, - inputs=[gr.Textbox(label="Context", lines=5, value=context), - gr.Textbox(label="Question", value=question)]).launch() \ No newline at end of file diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/image_encoder.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/image_encoder.py deleted file mode 100644 index a6ad9ad2938842308e482a05c9d35ab08db9b2c3..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/image_encoder.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from typing import Optional, Tuple, Type - -from .common import LayerNorm2d, MLPBlock - - -# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa -class ImageEncoderViT(nn.Module): - def __init__( - self, - img_size: int = 1024, - patch_size: int = 16, - in_chans: int = 3, - embed_dim: int = 768, - depth: int = 12, - num_heads: int = 12, - mlp_ratio: float = 4.0, - out_chans: int = 256, - qkv_bias: bool = True, - norm_layer: Type[nn.Module] = nn.LayerNorm, - act_layer: Type[nn.Module] = nn.GELU, - use_abs_pos: bool = True, - use_rel_pos: bool = False, - rel_pos_zero_init: bool = True, - window_size: int = 0, - global_attn_indexes: Tuple[int, ...] = (), - ) -> None: - """ - Args: - img_size (int): Input image size. - patch_size (int): Patch size. - in_chans (int): Number of input image channels. - embed_dim (int): Patch embedding dimension. - depth (int): Depth of ViT. - num_heads (int): Number of attention heads in each ViT block. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool): If True, add a learnable bias to query, key, value. - norm_layer (nn.Module): Normalization layer. - act_layer (nn.Module): Activation layer. - use_abs_pos (bool): If True, use absolute positional embeddings. - use_rel_pos (bool): If True, add relative positional embeddings to the attention map. - rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. - window_size (int): Window size for window attention blocks. - global_attn_indexes (list): Indexes for blocks using global attention. - """ - super().__init__() - self.img_size = img_size - - self.patch_embed = PatchEmbed( - kernel_size=(patch_size, patch_size), - stride=(patch_size, patch_size), - in_chans=in_chans, - embed_dim=embed_dim, - ) - - self.pos_embed: Optional[nn.Parameter] = None - if use_abs_pos: - # Initialize absolute positional embedding with pretrain image size. - self.pos_embed = nn.Parameter( - torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) - ) - - self.blocks = nn.ModuleList() - for i in range(depth): - block = Block( - dim=embed_dim, - num_heads=num_heads, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - norm_layer=norm_layer, - act_layer=act_layer, - use_rel_pos=use_rel_pos, - rel_pos_zero_init=rel_pos_zero_init, - window_size=window_size if i not in global_attn_indexes else 0, - input_size=(img_size // patch_size, img_size // patch_size), - ) - self.blocks.append(block) - - self.neck = nn.Sequential( - nn.Conv2d( - embed_dim, - out_chans, - kernel_size=1, - bias=False, - ), - LayerNorm2d(out_chans), - nn.Conv2d( - out_chans, - out_chans, - kernel_size=3, - padding=1, - bias=False, - ), - LayerNorm2d(out_chans), - ) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.patch_embed(x) - if self.pos_embed is not None: - x = x + self.pos_embed - - for blk in self.blocks: - x = blk(x) - - x = self.neck(x.permute(0, 3, 1, 2)) - - return x - - -class Block(nn.Module): - """Transformer blocks with support of window attention and residual propagation blocks""" - - def __init__( - self, - dim: int, - num_heads: int, - mlp_ratio: float = 4.0, - qkv_bias: bool = True, - norm_layer: Type[nn.Module] = nn.LayerNorm, - act_layer: Type[nn.Module] = nn.GELU, - use_rel_pos: bool = False, - rel_pos_zero_init: bool = True, - window_size: int = 0, - input_size: Optional[Tuple[int, int]] = None, - ) -> None: - """ - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads in each ViT block. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool): If True, add a learnable bias to query, key, value. - norm_layer (nn.Module): Normalization layer. - act_layer (nn.Module): Activation layer. - use_rel_pos (bool): If True, add relative positional embeddings to the attention map. - rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. - window_size (int): Window size for window attention blocks. If it equals 0, then - use global attention. - input_size (int or None): Input resolution for calculating the relative positional - parameter size. - """ - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, - qkv_bias=qkv_bias, - use_rel_pos=use_rel_pos, - rel_pos_zero_init=rel_pos_zero_init, - input_size=input_size if window_size == 0 else (window_size, window_size), - ) - - self.norm2 = norm_layer(dim) - self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) - - self.window_size = window_size - - def forward(self, x: torch.Tensor) -> torch.Tensor: - shortcut = x - x = self.norm1(x) - # Window partition - if self.window_size > 0: - H, W = x.shape[1], x.shape[2] - x, pad_hw = window_partition(x, self.window_size) - - x = self.attn(x) - # Reverse window partition - if self.window_size > 0: - x = window_unpartition(x, self.window_size, pad_hw, (H, W)) - - x = shortcut + x - x = x + self.mlp(self.norm2(x)) - - return x - - -class Attention(nn.Module): - """Multi-head Attention block with relative position embeddings.""" - - def __init__( - self, - dim: int, - num_heads: int = 8, - qkv_bias: bool = True, - use_rel_pos: bool = False, - rel_pos_zero_init: bool = True, - input_size: Optional[Tuple[int, int]] = None, - ) -> None: - """ - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - qkv_bias (bool: If True, add a learnable bias to query, key, value. - rel_pos (bool): If True, add relative positional embeddings to the attention map. - rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. - input_size (int or None): Input resolution for calculating the relative positional - parameter size. - """ - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = head_dim**-0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.proj = nn.Linear(dim, dim) - - self.use_rel_pos = use_rel_pos - if self.use_rel_pos: - assert ( - input_size is not None - ), "Input size must be provided if using relative positional encoding." - # initialize relative positional embeddings - self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) - self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - B, H, W, _ = x.shape - # qkv with shape (3, B, nHead, H * W, C) - qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - # q, k, v with shape (B * nHead, H * W, C) - q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) - - attn = (q * self.scale) @ k.transpose(-2, -1) - - if self.use_rel_pos: - attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) - - attn = attn.softmax(dim=-1) - x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) - x = self.proj(x) - - return x - - -def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: - """ - Partition into non-overlapping windows with padding if needed. - Args: - x (tensor): input tokens with [B, H, W, C]. - window_size (int): window size. - - Returns: - windows: windows after partition with [B * num_windows, window_size, window_size, C]. - (Hp, Wp): padded height and width before partition - """ - B, H, W, C = x.shape - - pad_h = (window_size - H % window_size) % window_size - pad_w = (window_size - W % window_size) % window_size - if pad_h > 0 or pad_w > 0: - x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) - Hp, Wp = H + pad_h, W + pad_w - - x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows, (Hp, Wp) - - -def window_unpartition( - windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] -) -> torch.Tensor: - """ - Window unpartition into original sequences and removing padding. - Args: - x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. - window_size (int): window size. - pad_hw (Tuple): padded height and width (Hp, Wp). - hw (Tuple): original height and width (H, W) before padding. - - Returns: - x: unpartitioned sequences with [B, H, W, C]. - """ - Hp, Wp = pad_hw - H, W = hw - B = windows.shape[0] // (Hp * Wp // window_size // window_size) - x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) - - if Hp > H or Wp > W: - x = x[:, :H, :W, :].contiguous() - return x - - -def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: - """ - Get relative positional embeddings according to the relative positions of - query and key sizes. - Args: - q_size (int): size of query q. - k_size (int): size of key k. - rel_pos (Tensor): relative position embeddings (L, C). - - Returns: - Extracted positional embeddings according to relative positions. - """ - max_rel_dist = int(2 * max(q_size, k_size) - 1) - # Interpolate rel pos if needed. - if rel_pos.shape[0] != max_rel_dist: - # Interpolate rel pos. - rel_pos_resized = F.interpolate( - rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), - size=max_rel_dist, - mode="linear", - ) - rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) - else: - rel_pos_resized = rel_pos - - # Scale the coords with short length if shapes for q and k are different. - q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) - k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) - relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) - - return rel_pos_resized[relative_coords.long()] - - -def add_decomposed_rel_pos( - attn: torch.Tensor, - q: torch.Tensor, - rel_pos_h: torch.Tensor, - rel_pos_w: torch.Tensor, - q_size: Tuple[int, int], - k_size: Tuple[int, int], -) -> torch.Tensor: - """ - Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. - https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 - Args: - attn (Tensor): attention map. - q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). - rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. - rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. - q_size (Tuple): spatial sequence size of query q with (q_h, q_w). - k_size (Tuple): spatial sequence size of key k with (k_h, k_w). - - Returns: - attn (Tensor): attention map with added relative positional embeddings. - """ - q_h, q_w = q_size - k_h, k_w = k_size - Rh = get_rel_pos(q_h, k_h, rel_pos_h) - Rw = get_rel_pos(q_w, k_w, rel_pos_w) - - B, _, dim = q.shape - r_q = q.reshape(B, q_h, q_w, dim) - rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) - rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) - - attn = ( - attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] - ).view(B, q_h * q_w, k_h * k_w) - - return attn - - -class PatchEmbed(nn.Module): - """ - Image to Patch Embedding. - """ - - def __init__( - self, - kernel_size: Tuple[int, int] = (16, 16), - stride: Tuple[int, int] = (16, 16), - padding: Tuple[int, int] = (0, 0), - in_chans: int = 3, - embed_dim: int = 768, - ) -> None: - """ - Args: - kernel_size (Tuple): kernel size of the projection layer. - stride (Tuple): stride of the projection layer. - padding (Tuple): padding size of the projection layer. - in_chans (int): Number of input image channels. - embed_dim (int): embed_dim (int): Patch embedding dimension. - """ - super().__init__() - - self.proj = nn.Conv2d( - in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding - ) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.proj(x) - # B C H W -> B H W C - x = x.permute(0, 2, 3, 1) - return x diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/mask_decoder.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/mask_decoder.py deleted file mode 100644 index 3e86f7cc9ad95582a08ef2531c68d03fa4af8d99..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/mask_decoder.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import nn -from torch.nn import functional as F - -from typing import List, Tuple, Type - -from .common import LayerNorm2d - - -class MaskDecoder(nn.Module): - def __init__( - self, - *, - transformer_dim: int, - transformer: nn.Module, - num_multimask_outputs: int = 3, - activation: Type[nn.Module] = nn.GELU, - iou_head_depth: int = 3, - iou_head_hidden_dim: int = 256, - ) -> None: - """ - Predicts masks given an image and prompt embeddings, using a - tranformer architecture. - - Arguments: - transformer_dim (int): the channel dimension of the transformer - transformer (nn.Module): the transformer used to predict masks - num_multimask_outputs (int): the number of masks to predict - when disambiguating masks - activation (nn.Module): the type of activation to use when - upscaling masks - iou_head_depth (int): the depth of the MLP used to predict - mask quality - iou_head_hidden_dim (int): the hidden dimension of the MLP - used to predict mask quality - """ - super().__init__() - self.transformer_dim = transformer_dim - self.transformer = transformer - - self.num_multimask_outputs = num_multimask_outputs - - self.iou_token = nn.Embedding(1, transformer_dim) - self.num_mask_tokens = num_multimask_outputs + 1 - self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) - - self.output_upscaling = nn.Sequential( - nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), - LayerNorm2d(transformer_dim // 4), - activation(), - nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), - activation(), - ) - self.output_hypernetworks_mlps = nn.ModuleList( - [ - MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) - for i in range(self.num_mask_tokens) - ] - ) - - self.iou_prediction_head = MLP( - transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth - ) - - def forward( - self, - image_embeddings: torch.Tensor, - image_pe: torch.Tensor, - sparse_prompt_embeddings: torch.Tensor, - dense_prompt_embeddings: torch.Tensor, - multimask_output: bool, - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Predict masks given image and prompt embeddings. - - Arguments: - image_embeddings (torch.Tensor): the embeddings from the image encoder - image_pe (torch.Tensor): positional encoding with the shape of image_embeddings - sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes - dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs - multimask_output (bool): Whether to return multiple masks or a single - mask. - - Returns: - torch.Tensor: batched predicted masks - torch.Tensor: batched predictions of mask quality - """ - masks, iou_pred = self.predict_masks( - image_embeddings=image_embeddings, - image_pe=image_pe, - sparse_prompt_embeddings=sparse_prompt_embeddings, - dense_prompt_embeddings=dense_prompt_embeddings, - ) - - # Select the correct mask or masks for outptu - if multimask_output: - mask_slice = slice(1, None) - else: - mask_slice = slice(0, 1) - masks = masks[:, mask_slice, :, :] - iou_pred = iou_pred[:, mask_slice] - - # Prepare output - return masks, iou_pred - - def predict_masks( - self, - image_embeddings: torch.Tensor, - image_pe: torch.Tensor, - sparse_prompt_embeddings: torch.Tensor, - dense_prompt_embeddings: torch.Tensor, - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Predicts masks. See 'forward' for more details.""" - # Concatenate output tokens - output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) - output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) - tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) - - # Expand per-image data in batch direction to be per-mask - src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) - src = src + dense_prompt_embeddings - pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) - b, c, h, w = src.shape - - # Run the transformer - hs, src = self.transformer(src, pos_src, tokens) - iou_token_out = hs[:, 0, :] - mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] - - # Upscale mask embeddings and predict masks using the mask tokens - src = src.transpose(1, 2).view(b, c, h, w) - upscaled_embedding = self.output_upscaling(src) - hyper_in_list: List[torch.Tensor] = [] - for i in range(self.num_mask_tokens): - hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) - hyper_in = torch.stack(hyper_in_list, dim=1) - b, c, h, w = upscaled_embedding.shape - masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) - - # Generate mask quality predictions - iou_pred = self.iou_prediction_head(iou_token_out) - - return masks, iou_pred - - -# Lightly adapted from -# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa -class MLP(nn.Module): - def __init__( - self, - input_dim: int, - hidden_dim: int, - output_dim: int, - num_layers: int, - sigmoid_output: bool = False, - ) -> None: - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - self.sigmoid_output = sigmoid_output - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - if self.sigmoid_output: - x = F.sigmoid(x) - return x diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/transforms/flip.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/transforms/flip.py deleted file mode 100644 index c1543cb65f8d3892054dc96f39a8196987fb6bfd..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/transforms/flip.py +++ /dev/null @@ -1,37 +0,0 @@ -import torch - -from ..clicker import Click -from .base import BaseTransform - - -class AddHorizontalFlip(BaseTransform): - def transform(self, image_nd, clicks_lists): - assert len(image_nd.shape) == 4 - image_nd = torch.cat([image_nd, torch.flip(image_nd, dims=[3])], dim=0) - - image_width = image_nd.shape[3] - clicks_lists_flipped = [] - for clicks_list in clicks_lists: - clicks_list_flipped = [Click(is_positive=click.is_positive, - coords=(click.coords[0], image_width - click.coords[1] - 1)) - for click in clicks_list] - clicks_lists_flipped.append(clicks_list_flipped) - clicks_lists = clicks_lists + clicks_lists_flipped - - return image_nd, clicks_lists - - def inv_transform(self, prob_map): - assert len(prob_map.shape) == 4 and prob_map.shape[0] % 2 == 0 - num_maps = prob_map.shape[0] // 2 - prob_map, prob_map_flipped = prob_map[:num_maps], prob_map[num_maps:] - - return 0.5 * (prob_map + torch.flip(prob_map_flipped, dims=[3])) - - def get_state(self): - return None - - def set_state(self, state): - pass - - def reset(self): - pass diff --git a/spaces/Matthijs/speecht5-asr-demo/app.py b/spaces/Matthijs/speecht5-asr-demo/app.py deleted file mode 100644 index 9ceb96705998b3e999307364e7bb3b85aff5ff6e..0000000000000000000000000000000000000000 --- a/spaces/Matthijs/speecht5-asr-demo/app.py +++ /dev/null @@ -1,116 +0,0 @@ -import gradio as gr -import librosa -import torch - -from transformers import SpeechT5Processor, SpeechT5ForSpeechToText - - -checkpoint = "microsoft/speecht5_asr" -processor = SpeechT5Processor.from_pretrained(checkpoint) -model = SpeechT5ForSpeechToText.from_pretrained(checkpoint) - - -def process_audio(sampling_rate, waveform): - # convert from int16 to floating point - waveform = waveform / 32678.0 - - # convert to mono if stereo - if len(waveform.shape) > 1: - waveform = librosa.to_mono(waveform.T) - - # resample to 16 kHz if necessary - if sampling_rate != 16000: - waveform = librosa.resample(waveform, orig_sr=sampling_rate, target_sr=16000) - - # limit to 30 seconds - waveform = waveform[:16000*30] - - # make PyTorch tensor - waveform = torch.tensor(waveform) - return waveform - - -def predict(audio, mic_audio=None): - # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels)) - if mic_audio is not None: - sampling_rate, waveform = mic_audio - elif audio is not None: - sampling_rate, waveform = audio - else: - return "(please provide audio)" - - waveform = process_audio(sampling_rate, waveform) - inputs = processor(audio=waveform, sampling_rate=16000, return_tensors="pt") - predicted_ids = model.generate(**inputs, max_length=400) - transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) - return transcription[0] - - -title = "SpeechT5: Automatic Speech Recognition" - -description = """ -The SpeechT5 model is pre-trained on text as well as speech inputs, with targets that are also a mix of text and speech. -By pre-training on text and speech at the same time, it learns unified representations for both, resulting in improved modeling capabilities. - -SpeechT5 can be fine-tuned for different speech tasks. This space demonstrates the speech-to-text -or automatic speech recognition (ASR) checkpoint for the English language. - -See also the text-to-speech (TTS) demo -and the voice conversion demo. - -How to use: Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before -being passed into the model. The output is the text transcription of the audio. SpeechT5 uses a simple character-based tokenizer, with no -additional language model on top, so the output won't have punctuation or capitalization and may contain the occasional spelling error. -""" - -article = """ -
      - -

      References: SpeechT5 paper | -original GitHub | -original weights

      - -
      -@article{Ao2021SpeechT5,
      -  title   = {SpeechT5: Unified-Modal Encoder-Decoder Pre-training for Spoken Language Processing},
      -  author  = {Junyi Ao and Rui Wang and Long Zhou and Chengyi Wang and Shuo Ren and Yu Wu and Shujie Liu and Tom Ko and Qing Li and Yu Zhang and Zhihua Wei and Yao Qian and Jinyu Li and Furu Wei},
      -  eprint={2110.07205},
      -  archivePrefix={arXiv},
      -  primaryClass={eess.AS},
      -  year={2021}
      -}
      -
      - -

      Example sound credits:

      - -

        -
      • "Hmm, I don't know" from InspectorJ (CC BY 4.0 license) -
      • "Henry V" excerpt from acclivity (CC BY-NC 4.0 license) -
      • "You can see it in the eyes" from JoyOhJoy (CC0 license) -
      • "We yearn for time" from Sample_Me (CC0 license) -
      - -
      -""" - -examples = [ - ["examples/hmm_i_dont_know.wav", None], - ["examples/henry5.mp3", None], - ["examples/yearn_for_time.mp3", None], - ["examples/see_in_eyes.wav", None], -] - -gr.Interface( - fn=predict, - inputs=[ - gr.Audio(label="Upload Speech", source="upload", type="numpy"), - gr.Audio(label="Record Speech", source="microphone", type="numpy"), - ], - outputs=[ - gr.Text(label="Transcription"), - ], - title=title, - description=description, - article=article, - examples=examples, -).launch() diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/__main__.py b/spaces/MetaWabbit/Auto-GPT/autogpt/__main__.py deleted file mode 100644 index 128f9eea4900429e88276abdde3419b806001ac7..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/autogpt/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Auto-GPT: A GPT powered AI Assistant""" -import autogpt.cli - -if __name__ == "__main__": - autogpt.cli.main() diff --git a/spaces/MetaWabbit/Auto-GPT/tests/test_prompt_generator.py b/spaces/MetaWabbit/Auto-GPT/tests/test_prompt_generator.py deleted file mode 100644 index 6a0bfd6c7bbdbfaa3750e9dee621bd25e17a448b..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/tests/test_prompt_generator.py +++ /dev/null @@ -1,114 +0,0 @@ -from unittest import TestCase - -from autogpt.promptgenerator import PromptGenerator - - -class TestPromptGenerator(TestCase): - """ - Test cases for the PromptGenerator class, which is responsible for generating - prompts for the AI with constraints, commands, resources, and performance evaluations. - """ - - @classmethod - def setUpClass(cls): - """ - Set up the initial state for each test method by creating an instance of PromptGenerator. - """ - cls.generator = PromptGenerator() - - # Test whether the add_constraint() method adds a constraint to the generator's constraints list - def test_add_constraint(self): - """ - Test if the add_constraint() method adds a constraint to the generator's constraints list. - """ - constraint = "Constraint1" - self.generator.add_constraint(constraint) - self.assertIn(constraint, self.generator.constraints) - - # Test whether the add_command() method adds a command to the generator's commands list - def test_add_command(self): - """ - Test if the add_command() method adds a command to the generator's commands list. - """ - command_label = "Command Label" - command_name = "command_name" - args = {"arg1": "value1", "arg2": "value2"} - self.generator.add_command(command_label, command_name, args) - command = { - "label": command_label, - "name": command_name, - "args": args, - } - self.assertIn(command, self.generator.commands) - - def test_add_resource(self): - """ - Test if the add_resource() method adds a resource to the generator's resources list. - """ - resource = "Resource1" - self.generator.add_resource(resource) - self.assertIn(resource, self.generator.resources) - - def test_add_performance_evaluation(self): - """ - Test if the add_performance_evaluation() method adds an evaluation to the generator's - performance_evaluation list. - """ - evaluation = "Evaluation1" - self.generator.add_performance_evaluation(evaluation) - self.assertIn(evaluation, self.generator.performance_evaluation) - - def test_generate_prompt_string(self): - """ - Test if the generate_prompt_string() method generates a prompt string with all the added - constraints, commands, resources, and evaluations. - """ - # Define the test data - constraints = ["Constraint1", "Constraint2"] - commands = [ - { - "label": "Command1", - "name": "command_name1", - "args": {"arg1": "value1"}, - }, - { - "label": "Command2", - "name": "command_name2", - "args": {}, - }, - ] - resources = ["Resource1", "Resource2"] - evaluations = ["Evaluation1", "Evaluation2"] - - # Add test data to the generator - for constraint in constraints: - self.generator.add_constraint(constraint) - for command in commands: - self.generator.add_command( - command["label"], command["name"], command["args"] - ) - for resource in resources: - self.generator.add_resource(resource) - for evaluation in evaluations: - self.generator.add_performance_evaluation(evaluation) - - # Generate the prompt string and verify its correctness - prompt_string = self.generator.generate_prompt_string() - self.assertIsNotNone(prompt_string) - - # Check if all constraints, commands, resources, and evaluations are present in the prompt string - for constraint in constraints: - self.assertIn(constraint, prompt_string) - for command in commands: - self.assertIn(command["name"], prompt_string) - for key, value in command["args"].items(): - self.assertIn(f'"{key}": "{value}"', prompt_string) - for resource in resources: - self.assertIn(resource, prompt_string) - for evaluation in evaluations: - self.assertIn(evaluation, prompt_string) - - self.assertIn("constraints", prompt_string.lower()) - self.assertIn("commands", prompt_string.lower()) - self.assertIn("resources", prompt_string.lower()) - self.assertIn("performance evaluation", prompt_string.lower()) diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/satrn/README.md b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/satrn/README.md deleted file mode 100644 index f0421c9e04e4ea4719cf953ed5871808685cb907..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/satrn/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# SATRN - -> [On Recognizing Texts of Arbitrary Shapes with 2D Self-Attention](https://arxiv.org/abs/1910.04396) - - - -## Abstract - -Scene text recognition (STR) is the task of recognizing character sequences in natural scenes. While there have been great advances in STR methods, current methods still fail to recognize texts in arbitrary shapes, such as heavily curved or rotated texts, which are abundant in daily life (e.g. restaurant signs, product labels, company logos, etc). This paper introduces a novel architecture to recognizing texts of arbitrary shapes, named Self-Attention Text Recognition Network (SATRN), which is inspired by the Transformer. SATRN utilizes the self-attention mechanism to describe two-dimensional (2D) spatial dependencies of characters in a scene text image. Exploiting the full-graph propagation of self-attention, SATRN can recognize texts with arbitrary arrangements and large inter-character spacing. As a result, SATRN outperforms existing STR models by a large margin of 5.7 pp on average in "irregular text" benchmarks. We provide empirical analyses that illustrate the inner mechanisms and the extent to which the model is applicable (e.g. rotated and multi-line text). We will open-source the code. - -
      - -
      - -## Dataset - -### Train Dataset - -| trainset | instance_num | repeat_num | source | -| :-------: | :----------: | :--------: | :----: | -| SynthText | 7266686 | 1 | synth | -| Syn90k | 8919273 | 1 | synth | - -### Test Dataset - -| testset | instance_num | type | -| :-----: | :----------: | :-------: | -| IIIT5K | 3000 | regular | -| SVT | 647 | regular | -| IC13 | 1015 | regular | -| IC15 | 2077 | irregular | -| SVTP | 645 | irregular | -| CT80 | 288 | irregular | - -## Results and Models - -| Methods | | Regular Text | | | | Irregular Text | | download | -| :--------------------------------------------------------------------: | :----: | :----------: | :-------: | :-: | :-------: | :------------: | :----: | :---------------------------------------------------------------------: | -| | IIIT5K | SVT | IC13-1015 | | IC15-2077 | SVTP | CT80 | | -| [Satrn](/configs/textrecog/satrn/satrn_shallow_5e_st_mj.py) | 0.9600 | 0.9181 | 0.9606 | | 0.8045 | 0.8837 | 0.8993 | [model](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow_5e_st_mj/satrn_shallow_5e_st_mj_20220915_152443-5fd04a4c.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow_5e_st_mj/20220915_152443.log) | -| [Satrn-TTA](/configs/textrecog/satrn/satrn_shallow_5e_st_mj.py) | 0.9530 | 0.9181 | 0.9527 | | 0.8276 | 0.8884 | 0.9028 | | -| [Satrn_small](/configs/textrecog/satrn/satrn_shallow-small_5e_st_mj.py) | 0.9423 | 0.9011 | 0.9567 | | 0.7886 | 0.8574 | 0.8472 | [model](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow-small_5e_st_mj/satrn_shallow-small_5e_st_mj_20220915_152442-5591bf27.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow-small_5e_st_mj/20220915_152442.log) | -| [Satrn_small-TTA](/configs/textrecog/satrn/satrn_shallow-small_5e_st_mj.py) | 0.9380 | 0.8995 | 0.9488 | | 0.8122 | 0.8620 | 0.8507 | | - -## Citation - -```bibtex -@article{junyeop2019recognizing, - title={On Recognizing Texts of Arbitrary Shapes with 2D Self-Attention}, - author={Junyeop Lee, Sungrae Park, Jeonghun Baek, Seong Joon Oh, Seonghyeon Kim, Hwalsuk Lee}, - year={2019} -} -``` diff --git a/spaces/MuGeminorum/insecta/khandy/split_utils.py b/spaces/MuGeminorum/insecta/khandy/split_utils.py deleted file mode 100644 index a524e817d865a30f99c540e0cc52be230b7ad469..0000000000000000000000000000000000000000 --- a/spaces/MuGeminorum/insecta/khandy/split_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -import numbers -from collections.abc import Sequence - -import numpy as np - - -def split_by_num(x, num_splits, strict=True): - """ - Args: - num_splits: an integer indicating the number of splits - - References: - numpy.split and numpy.array_split - """ - # NB: np.ndarray is not Sequence - assert isinstance(x, (Sequence, np.ndarray)) - assert isinstance(num_splits, numbers.Integral) - - if strict: - assert len(x) % num_splits == 0 - split_size = (len(x) + num_splits - 1) // num_splits - out_list = [] - for i in range(0, len(x), split_size): - out_list.append(x[i: i + split_size]) - return out_list - - -def split_by_size(x, sizes): - """ - References: - tf.split - https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/misc.py - """ - # NB: np.ndarray is not Sequence - assert isinstance(x, (Sequence, np.ndarray)) - assert isinstance(sizes, (list, tuple)) - - assert sum(sizes) == len(x) - out_list = [] - start_index = 0 - for size in sizes: - out_list.append(x[start_index: start_index + size]) - start_index += size - return out_list - - -def split_by_slice(x, slices): - """ - References: - SliceLayer in Caffe, and numpy.split - """ - # NB: np.ndarray is not Sequence - assert isinstance(x, (Sequence, np.ndarray)) - assert isinstance(slices, (list, tuple)) - - out_list = [] - indices = [0] + list(slices) + [len(x)] - for i in range(len(slices) + 1): - out_list.append(x[indices[i]: indices[i + 1]]) - return out_list - - -def split_by_ratio(x, ratios): - # NB: np.ndarray is not Sequence - assert isinstance(x, (Sequence, np.ndarray)) - assert isinstance(ratios, (list, tuple)) - - pdf = [k / sum(ratios) for k in ratios] - cdf = [sum(pdf[:k]) for k in range(len(pdf) + 1)] - indices = [int(round(len(x) * k)) for k in cdf] - return [x[indices[i]: indices[i + 1]] for i in range(len(ratios))] diff --git a/spaces/NCTCMumbai/NCTC/models/official/utils/testing/scripts/builds_common.sh b/spaces/NCTCMumbai/NCTC/models/official/utils/testing/scripts/builds_common.sh deleted file mode 100644 index 3cf08bb510d2a8ba0b06b1d38ccd1294b159ce15..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/utils/testing/scripts/builds_common.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# Common Bash functions used by build scripts - -COLOR_NC='\033[0m' -COLOR_BOLD='\033[1m' -COLOR_LIGHT_GRAY='\033[0;37m' -COLOR_GREEN='\033[0;32m' -COLOR_RED='\033[0;31m' - -die() { - # Print a message and exit with code 1. - # - # Usage: die - # e.g., die "Something bad happened." - - echo $@ - exit 1 -} - -num_cpus() { - # Get the number of CPUs - N_CPUS=$(grep -c ^processor /proc/cpuinfo) - if [[ -z ${N_CPUS} ]]; then - die "ERROR: Unable to determine the number of CPUs" - fi - - echo ${N_CPUS} -} - -# List files changed (i.e., added, or revised) from -# the common ancestor of HEAD and the latest master branch. -# Usage: get_changed_files_from_master_branch -get_changed_files_from_master_branch() { - ANCESTOR=$(git merge-base HEAD master origin/master) - git diff ${ANCESTOR} --diff-filter=d --name-only "$@" -} - -# List python files changed that still exist, -# i.e., not removed. -# Usage: get_py_files_to_check [--incremental] -get_py_files_to_check() { - if [[ "$1" == "--incremental" ]]; then - get_changed_files_from_master_branch -- '*.py' - elif [[ -z "$1" ]]; then - find official/ -name '*.py' - else - die "Found unsupported args: $@ for get_py_files_to_check." - fi -} diff --git a/spaces/Namit2111/id_verfiy/face_extract.py b/spaces/Namit2111/id_verfiy/face_extract.py deleted file mode 100644 index 780475813c76d705a1516028b0eb3e04caa5c714..0000000000000000000000000000000000000000 --- a/spaces/Namit2111/id_verfiy/face_extract.py +++ /dev/null @@ -1,31 +0,0 @@ -import cv2 -import sys - - - - -def extract(face): - imagePath = face - - image = cv2.imread(imagePath) - gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - - faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") - faces = faceCascade.detectMultiScale( - gray, - scaleFactor=1.3, - minNeighbors=3, - minSize=(30, 30) - ) - - # print("[INFO] Found {0} Faces.".format(len(faces))) - - for (x, y, w, h) in faces: - cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) - roi_color = image[y:y + h, x:x + w] - - name = str(w)+str(h)+'_faces.jpg' - cv2.imwrite(str(w) + str(h) + '_faces.jpg', roi_color) - return name -# status = cv2.imwrite('faces_detected.jpg', image) -# print("[INFO] Image faces_detected.jpg written to filesystem: ", status) \ No newline at end of file diff --git a/spaces/NbAiLab/whisper-norwegian-small/app.py b/spaces/NbAiLab/whisper-norwegian-small/app.py deleted file mode 100644 index 0b831ad481d02520215df9bbad8534d0a3811f6a..0000000000000000000000000000000000000000 --- a/spaces/NbAiLab/whisper-norwegian-small/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch - -import gradio as gr -import pytube as pt -from transformers import pipeline -from huggingface_hub import model_info - -MODEL_NAME = "NbAiLab/whisper-norwegian-small-test" #this always needs to stay in line 8 :D sorry for the hackiness -lang = "no" - -device = 0 if torch.cuda.is_available() else "cpu" -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, -) - -pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") - -def transcribe(microphone, file_upload): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - text = pipe(file)["text"] - - return warn_output + text - - -def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
      ' - "
      " - ) - return HTML_str - - -def yt_transcribe(yt_url): - yt = pt.YouTube(yt_url) - html_embed_str = _return_yt_html_embed(yt_url) - stream = yt.streams.filter(only_audio=True)[0] - stream.download(filename="audio.mp3") - - text = pipe("audio.mp3")["text"] - - return html_embed_str, text - - -demo = gr.Blocks() - -mf_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Audio(source="upload", type="filepath", optional=True), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="Norwegian Whisper Small Demo: Transcribe Audio", - description=( - "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the the fine-tuned" - f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files" - " of arbitrary length." - ), - allow_flagging="never", -) - -yt_transcribe = gr.Interface( - fn=yt_transcribe, - inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")], - outputs=["html", "text"], - layout="horizontal", - theme="huggingface", - title="Norwegian Whisper Small Demo: Transcribe YouTube", - description=( - "Transcribe long-form YouTube videos with the click of a button! Demo uses the the fine-tuned checkpoint:" - f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files of" - " arbitrary length." - ), - allow_flagging="never", -) - -with demo: - gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"]) - -demo.launch(enable_queue=True) diff --git a/spaces/NechkaP/arxiv-streamlit-lab/app.py b/spaces/NechkaP/arxiv-streamlit-lab/app.py deleted file mode 100644 index 64c63062991f36d0384568ab1ba4dc61bb0a74a0..0000000000000000000000000000000000000000 --- a/spaces/NechkaP/arxiv-streamlit-lab/app.py +++ /dev/null @@ -1,224 +0,0 @@ -import streamlit as st - -import warnings -warnings.simplefilter('ignore') -import numpy as np -import pandas as pd -from tqdm import tqdm -from sklearn import metrics -import transformers -import torch -import json -import pandas as pd -from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler -from sklearn.model_selection import train_test_split -from transformers import DistilBertTokenizer, DistilBertModel -import logging -logging.basicConfig(level=logging.ERROR) -from torch import cuda -device = 'cuda' if cuda.is_available() else 'cpu' - - -st.markdown("## arXiv classificator") -# st.markdown("", unsafe_allow_html=True) -st.markdown("Please type the article's title and abstract below") - -title = st.text_input("Title") -abstract = st.text_input("Abstract") - - -def is_good(tag: str) -> bool: - return "stat." in tag\ - or "cs." in tag\ - or "math." in tag\ - or "ph." in tag\ - or "fin." in tag\ - or "bio." in tag\ - or "eess." in tag\ - or "econ." in tag - - -def get_all_tags(tag_str: str): - tag_json = tag_str.replace("'", '"').replace("None", '"None"') - return [elem["term"] for elem in json.loads(tag_json) if is_good(elem["term"])] - -def join_title_and_summary(row) -> str: - return row["title"].replace("\n", " ") + " " + row["summary"].replace("\n", " ") - -class MultiLabelDataset(Dataset): - def __init__(self, dataframe, tokenizer, max_len): - self.tokenizer = tokenizer - self.data = dataframe - self.text = self.data["Text"] - self.targets = self.data["Labels"] - self.max_len = max_len - - def __len__(self): - return len(self.text) - - def __getitem__(self, index): - text = str(self.text[index]) - text = " ".join(text.split()) - - inputs = self.tokenizer.encode_plus( - text, - truncation=True, - add_special_tokens=True, - max_length=self.max_len, - pad_to_max_length=True, - return_token_type_ids=True - ) - ids = inputs['input_ids'] - mask = inputs['attention_mask'] - token_type_ids = inputs["token_type_ids"] - - return { - 'ids': torch.tensor(ids, dtype=torch.long), - 'mask': torch.tensor(mask, dtype=torch.long), - 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), - 'targets': torch.tensor(self.targets[index], dtype=torch.float) - } - -class DistilBERTClass(torch.nn.Module): - def __init__(self): - super(DistilBERTClass, self).__init__() - self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased") - self.pre_classifier = torch.nn.Linear(768, 768) - self.dropout = torch.nn.Dropout(0.1) - self.classifier = torch.nn.Linear(768, 124) - - def forward(self, input_ids, attention_mask, token_type_ids): - output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask) - hidden_state = output_1[0] - pooler = hidden_state[:, 0] - pooler = self.pre_classifier(pooler) - pooler = torch.nn.Tanh()(pooler) - pooler = self.dropout(pooler) - output = self.classifier(pooler) - return output - -def loss_fn(outputs, targets): - return torch.nn.BCEWithLogitsLoss()(outputs, targets) - -@st.cache -def prepare_model(): - with open("./arxivData.json", 'r') as fp: - data = json.load(fp) - data = pd.DataFrame(data) - data.drop(['id', "month", "author", "day", "year", "link"], inplace=True, axis=1) - labels = data["tag"].map(get_all_tags) - good_tags = set() - for tags in labels: - for tag in tags: - good_tags.add(tag) - enum_tags = dict() - enum_tags_reverse = [None for _ in range(len(good_tags))] - for idx, tag in enumerate(good_tags): - enum_tags[tag] = idx - enum_tags_reverse[idx] = tag - def map_tags_to_target_vector(tags): - target_vector = [0.0] * len(enum_tags) - for tag in tags: - idx = enum_tags[tag] - target_vector[idx] = 1.0 / len(tags) - - assert np.allclose(np.sum(target_vector), 1.0, 0.000001) - return target_vector - - vectors = labels.map(map_tags_to_target_vector) - texts = data.apply(join_title_and_summary, axis=1) - preprocessed_data = pd.DataFrame({ - "Labels": vectors, - "Text": texts - }) - MAX_LEN = 512 - TRAIN_BATCH_SIZE = 4 - VALID_BATCH_SIZE = 4 - EPOCHS = 1 - LEARNING_RATE = 1e-05 - tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', truncation=True, -to_lower_case=True) - - train_data, test_data = train_test_split(preprocessed_data, train_size=0.8) - train_data.reset_index(drop=True, inplace=True) - test_data.reset_index(drop=True, inplace=True) - - training_set = MultiLabelDataset(train_data, tokenizer, MAX_LEN) - testing_set = MultiLabelDataset(test_data, tokenizer, MAX_LEN) - - train_params = {'batch_size': TRAIN_BATCH_SIZE, - 'shuffle': True, - 'num_workers': 0 - } - - test_params = {'batch_size': VALID_BATCH_SIZE, - 'shuffle': True, - 'num_workers': 0 - } - - training_loader = DataLoader(training_set, **train_params) - testing_loader = DataLoader(testing_set, **test_params) - - model = DistilBERTClass() - model.to(device) - - optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE) - - def train(epoch): - model.train() - for _, data in tqdm(enumerate(training_loader, 0)): - ids = data['ids'].to(device, dtype = torch.long) - mask = data['mask'].to(device, dtype = torch.long) - token_type_ids = data['token_type_ids'].to(device, dtype = torch.long) - targets = data['targets'].to(device, dtype = torch.float) - outputs = model(ids, mask, token_type_ids) - - optimizer.zero_grad() - loss = loss_fn(outputs, targets) - if _ % 100==0: - print(f'Epoch: {epoch}, Loss: {loss.item()}') - - loss.backward() - optimizer.step() - for epoch in range(EPOCHS): - train(epoch) - - def predict(text, abstract): - text += " " + abstract - text = " ".join(text.split()) - - inputs = tokenizer.encode_plus( - text, - truncation=True, - add_special_tokens=True, - max_length=MAX_LEN, - pad_to_max_length=True, - return_token_type_ids=True - ) - - ids = torch.tensor(inputs['input_ids'], dtype=torch.long).to(device, dtype = torch.long) - mask = torch.tensor(inputs['attention_mask'], dtype=torch.long).to(device, dtype = torch.long) - token_type_ids = torch.tensor(inputs["token_type_ids"], dtype=torch.long).to(device, dtype = torch.long) - - with torch.no_grad(): - logits = model(ids, attention_mask=mask, token_type_ids=token_type_ids) - - argmax = logits.cpu().detach().numpy().argmax() - return enum_tags_reverse[argmax] - - return predict - -predict_function = prepare_model() - -try: - raw_predictions = predict_function(title, abstract) - st.markdown(f"The most likely arXiv tag for this article is:")# {raw_predictions}") - if raw_predictions: - for item in raw_predictions: - st.markdown(f"* {item}") - elif (title or abstract): - st.markdown("* cs.CV") - else: - st.markdown("Oops... your input is empty") -except: - st.markdown("Oops... something went wrong") \ No newline at end of file diff --git a/spaces/NeilRokad/dreambooth-training/train_dreambooth.py b/spaces/NeilRokad/dreambooth-training/train_dreambooth.py deleted file mode 100644 index f4ff135e549f0d6c72f733092f3df817cb178e01..0000000000000000000000000000000000000000 --- a/spaces/NeilRokad/dreambooth-training/train_dreambooth.py +++ /dev/null @@ -1,889 +0,0 @@ -import argparse -import itertools -import math -import os -from pathlib import Path -from typing import Optional -import subprocess -import sys -import gc -import random - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch.utils.data import Dataset - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.utils.import_utils import is_xformers_available -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - - -logger = get_logger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - #required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - #required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - #required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default="", - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - - parser.add_argument( - "--save_n_steps", - type=int, - default=1, - help=("Save the model every n global_steps"), - ) - - - parser.add_argument( - "--save_starting_step", - type=int, - default=1, - help=("The step from which it starts saving intermediary checkpoints"), - ) - - parser.add_argument( - "--stop_text_encoder_training", - type=int, - default=1000000, - help=("The step at which the text_encoder is no longer trained"), - ) - - - parser.add_argument( - "--image_captions_filename", - action="store_true", - help="Get captions from filename", - ) - - - parser.add_argument( - "--dump_only_text_encoder", - action="store_true", - default=False, - help="Dump only text encoder", - ) - - parser.add_argument( - "--train_only_unet", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--cache_latents", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--Session_dir", - type=str, - default="", - help="Current session directory", - ) - - - - - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - #if args.instance_data_dir is None: - # raise ValueError("You must specify a train data directory.") - - #if args.with_prior_preservation: - # if args.class_data_dir is None: - # raise ValueError("You must specify a data directory for class images.") - # if args.class_prompt is None: - # raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - args, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.image_captions_filename = None - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if args.image_captions_filename: - self.image_captions_filename = True - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - random.shuffle(self.class_images_path) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - path = self.instance_images_path[index % self.num_instance_images] - instance_image = Image.open(path) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - - instance_prompt = self.instance_prompt - - if self.image_captions_filename: - filename = Path(path).stem - pt=''.join([i for i in filename if not i.isdigit()]) - pt=pt.replace("_"," ") - pt=pt.replace("(","") - pt=pt.replace(")","") - pt=pt.replace("-","") - instance_prompt = pt - sys.stdout.write(" " +instance_prompt+" ") - sys.stdout.flush() - - - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - -class LatentsDataset(Dataset): - def __init__(self, latents_cache, text_encoder_cache): - self.latents_cache = latents_cache - self.text_encoder_cache = text_encoder_cache - - def __len__(self): - return len(self.latents_cache) - - def __getitem__(self, index): - return self.latents_cache[index], self.text_encoder_cache[index] - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - -def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict: - """ - Starts from base starting dict and then adds the remaining key values from updater replacing the values from - the first starting/base dict with the second updater dict. - - For later: how does d = {**d1, **d2} replace collision? - - :param starting_dict: - :param updater_dict: - :return: - """ - new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict - new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict - return new_dict - -def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace: - """ - - ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x - :param args1: - :param args2: - :return: - """ - # - the merged args - # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}. - merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2)) - args = argparse.Namespace(**merged_key_values_for_namespace) - return args - -def run_training(args_imported): - args_default = parse_args() - args = merge_args(args_default, args_imported) - print(args) - logging_dir = Path(args.output_dir, args.logging_dir) - i=args.save_starting_step - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - with torch.autocast("cuda"): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg") - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - if args.train_only_unet: - if os.path.exists(str(args.output_dir+"/text_encoder_trained")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained") - elif os.path.exists(str(args.output_dir+"/text_encoder")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - if is_xformers_available(): - try: - print("Enabling memory efficient attention with xformers...") - unet.enable_xformers_memory_efficient_attention() - except Exception as e: - logger.warning( - f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}" - ) - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler") - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - args=args, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - - if args.cache_latents: - latents_cache = [] - text_encoder_cache = [] - for batch in tqdm(train_dataloader, desc="Caching latents"): - with torch.no_grad(): - batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype) - batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True) - latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) - if args.train_text_encoder: - text_encoder_cache.append(batch["input_ids"]) - else: - text_encoder_cache.append(text_encoder(batch["input_ids"])[0]) - train_dataset = LatentsDataset(latents_cache, text_encoder_cache) - train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True) - - del vae - #if not args.train_text_encoder: - # del text_encoder - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - def bar(prg): - br='|'+'█' * prg + ' ' * (25-prg)+'|' - return br - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - with torch.no_grad(): - if args.cache_latents: - latents_dist = batch[0][0] - else: - latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist - latents = latents_dist.sample() * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - if(args.cache_latents): - if args.train_text_encoder: - encoder_hidden_states = text_encoder(batch[0][1])[0] - else: - encoder_hidden_states = batch[0][1] - else: - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - fll=round((global_step*100)/args.max_train_steps) - fll=round(fll/4) - pr=bar(fll) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - progress_bar.set_description_str("Progress:"+pr) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30: - if accelerator.is_main_process: - print(" " +" Freezing the text_encoder ..."+" ") - frz_dir=args.output_dir + "/text_encoder_frozen" - if os.path.exists(frz_dir): - subprocess.call('rm -r '+ frz_dir, shell=True) - os.mkdir(frz_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(frz_dir) - - if args.save_n_steps >= 200: - if global_step < args.max_train_steps and global_step+1==i: - ckpt_name = "_step_" + str(global_step+1) - save_dir = Path(args.output_dir+ckpt_name) - save_dir=str(save_dir) - save_dir=save_dir.replace(" ", "_") - if not os.path.exists(save_dir): - os.mkdir(save_dir) - inst=save_dir[16:] - inst=inst.replace(" ", "_") - print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt") - # Create the pipeline using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(save_dir) - frz_dir=args.output_dir + "/text_encoder_frozen" - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True) - subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True) - chkpth=args.Session_dir+"/"+inst+".ckpt" - subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True) - subprocess.call('rm -r '+ save_dir, shell=True) - i=i+args.save_n_steps - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - if args.dump_only_text_encoder: - txt_dir=args.output_dir + "/text_encoder_trained" - if not os.path.exists(txt_dir): - os.mkdir(txt_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(txt_dir) - - elif args.train_only_unet: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(args.output_dir) - txt_dir=args.output_dir + "/text_encoder_trained" - subprocess.call('rm -r '+txt_dir, shell=True) - - else: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - frz_dir=args.output_dir + "/text_encoder_frozen" - pipeline.save_pretrained(args.output_dir) - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True) - subprocess.call('rm -r '+ frz_dir, shell=True) - - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - - accelerator.end_training() - del pipeline - torch.cuda.empty_cache() - gc.collect() -if __name__ == "__main__": - pass - #main() - diff --git a/spaces/Ninjagolover69/text_generator1/README.md b/spaces/Ninjagolover69/text_generator1/README.md deleted file mode 100644 index 0114fe0b8319c43e207b532377b72d98ef892950..0000000000000000000000000000000000000000 --- a/spaces/Ninjagolover69/text_generator1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text Generator1 -emoji: 📉 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OAOA/DifFace/basicsr/archs/rrdbnet_arch.py b/spaces/OAOA/DifFace/basicsr/archs/rrdbnet_arch.py deleted file mode 100644 index 63d07080c2ec1305090c59b7bfbbda2b003b18e4..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/archs/rrdbnet_arch.py +++ /dev/null @@ -1,119 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F - -from basicsr.utils.registry import ARCH_REGISTRY -from .arch_util import default_init_weights, make_layer, pixel_unshuffle - - -class ResidualDenseBlock(nn.Module): - """Residual Dense Block. - - Used in RRDB block in ESRGAN. - - Args: - num_feat (int): Channel number of intermediate features. - num_grow_ch (int): Channels for each growth. - """ - - def __init__(self, num_feat=64, num_grow_ch=32): - super(ResidualDenseBlock, self).__init__() - self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1) - self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1) - self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1) - self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1) - self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - # initialization - default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) - - def forward(self, x): - x1 = self.lrelu(self.conv1(x)) - x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) - x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) - x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - # Empirically, we use 0.2 to scale the residual for better performance - return x5 * 0.2 + x - - -class RRDB(nn.Module): - """Residual in Residual Dense Block. - - Used in RRDB-Net in ESRGAN. - - Args: - num_feat (int): Channel number of intermediate features. - num_grow_ch (int): Channels for each growth. - """ - - def __init__(self, num_feat, num_grow_ch=32): - super(RRDB, self).__init__() - self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch) - self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch) - self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch) - - def forward(self, x): - out = self.rdb1(x) - out = self.rdb2(out) - out = self.rdb3(out) - # Empirically, we use 0.2 to scale the residual for better performance - return out * 0.2 + x - - -@ARCH_REGISTRY.register() -class RRDBNet(nn.Module): - """Networks consisting of Residual in Residual Dense Block, which is used - in ESRGAN. - - ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks. - - We extend ESRGAN for scale x2 and scale x1. - Note: This is one option for scale 1, scale 2 in RRDBNet. - We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size - and enlarge the channel size before feeding inputs into the main ESRGAN architecture. - - Args: - num_in_ch (int): Channel number of inputs. - num_out_ch (int): Channel number of outputs. - num_feat (int): Channel number of intermediate features. - Default: 64 - num_block (int): Block number in the trunk network. Defaults: 23 - num_grow_ch (int): Channels for each growth. Default: 32. - """ - - def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32): - super(RRDBNet, self).__init__() - self.scale = scale - if scale == 2: - num_in_ch = num_in_ch * 4 - elif scale == 1: - num_in_ch = num_in_ch * 16 - self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) - self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch) - self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - # upsample - self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - def forward(self, x): - if self.scale == 2: - feat = pixel_unshuffle(x, scale=2) - elif self.scale == 1: - feat = pixel_unshuffle(x, scale=4) - else: - feat = x - feat = self.conv_first(feat) - body_feat = self.conv_body(self.body(feat)) - feat = feat + body_feat - # upsample - feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest'))) - feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest'))) - out = self.conv_last(self.lrelu(self.conv_hr(feat))) - return out diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py deleted file mode 100644 index a1f0d902acf0756580a1f4604feee8fc499a9a63..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import fairseq -import soundfile as sf -import torch -import torch.nn.functional as F - -from feature_utils import get_path_iterator, dump_feature - - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_w2v2_feature") - - -class Wav2Vec2FeatureReader(object): - def __init__(self, ckpt_path, layer, max_chunk=1600000): - ( - model, - cfg, - task, - ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) - self.model = model[0].eval().cuda() - self.task = task - self.layer = layer # assume this is 1-based like HuBERT - self.max_chunk = max_chunk - logger.info(f"TASK CONFIG:\n{self.task.cfg}") - logger.info(f" max_chunk = {self.max_chunk}") - logger.info(f" model:\n{self.model}") - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - assert sr == self.task.cfg.sample_rate, sr - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - if ref_len is not None and abs(ref_len - len(wav)) > 160: - logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, path, ref_len=None): - x = self.read_audio(path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float().cuda() - if self.task.cfg.normalize: - x = F.layer_norm(x, x.shape) - x = x.view(1, -1) - - feat = [] - for start in range(0, x.size(1), self.max_chunk): - x_chunk = x[:, start: start + self.max_chunk] - res = self.model.extract_features( - source=x_chunk, - padding_mask=None, - mask=False, - layer=self.layer - 1, - ) - feat_chunk = res["x"] - feat.append(feat_chunk) - return torch.cat(feat, 1).squeeze(0) - - -def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): - reader = Wav2Vec2FeatureReader(ckpt_path, layer, max_chunk) - generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) - dump_feature(reader, generator, num, split, nshard, rank, feat_dir) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("tsv_dir") - parser.add_argument("split") - parser.add_argument("ckpt_path") - parser.add_argument("layer", type=int) - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("feat_dir") - parser.add_argument("--max_chunk", type=int, default=1600000) - args = parser.parse_args() - logger.info(args) - - main(**vars(args)) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/simultaneous_translation/tests/test_text_models.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/simultaneous_translation/tests/test_text_models.py deleted file mode 100644 index 127adfa6337333ba5ae598fcd158956def0d520f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/simultaneous_translation/tests/test_text_models.py +++ /dev/null @@ -1,407 +0,0 @@ -import argparse -import unittest -from typing import Any, Dict - -import torch -from examples.simultaneous_translation.models import ( - transformer_monotonic_attention -) - - -from tests.test_roberta import FakeTask - - -DEFAULT_CONFIG = { - "attention_eps": 1e-6, - "mass_preservation": True, - "noise_type": "flat", - "noise_mean": 0.0, - "noise_var": 1.0, - "energy_bias_init": -2, - "energy_bias": True -} - - -PAD_INDEX = 1 - - -def generate_config(overrides_kv): - new_dict = {key: value for key, value in DEFAULT_CONFIG.items()} - for key, value in overrides_kv.items(): - new_dict[key] = value - return new_dict - - -def make_sample_with_padding(longer_src=False) -> Dict[str, Any]: - tokens_1 = torch.LongTensor( - [ - [2, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 2], - [ - 2, 11, 12, 14, 15, 10, 11, 12, 13, 14, 15, 2, - PAD_INDEX, PAD_INDEX - ], - ] - ) - tokens_2 = torch.LongTensor( - [ - [2, 11, 12, 13, 14, 2, PAD_INDEX, PAD_INDEX], - [2, 11, 22, 33, 2, PAD_INDEX, PAD_INDEX, PAD_INDEX] - ] - ) - if longer_src: - src_tokens = tokens_1[:, 1:] - prev_output_tokens = tokens_2 - else: - src_tokens = tokens_2[:, 1:8] - prev_output_tokens = tokens_1 - - src_lengths = src_tokens.ne(PAD_INDEX).sum(dim=1).long() - - sample = { - "net_input": { - "src_tokens": src_tokens, - "prev_output_tokens": prev_output_tokens, - "src_lengths": src_lengths, - }, - "target": prev_output_tokens[:, 1:], - } - return sample - - -def build_transformer_monotonic_attention(**extra_args: Any): - overrides = { - # Use characteristics dimensions - "encoder_embed_dim": 12, - "encoder_ffn_embed_dim": 14, - "decoder_embed_dim": 12, - "decoder_ffn_embed_dim": 14, - # Disable dropout so we have comparable tests. - "dropout": 0, - "attention_dropout": 0, - "activation_dropout": 0, - "encoder_layerdrop": 0, - } - overrides.update(extra_args) - # Overrides the defaults from the parser - args = argparse.Namespace(**overrides) - transformer_monotonic_attention.monotonic_tiny_architecture(args) - - torch.manual_seed(0) - task = FakeTask(args) - return ( - transformer_monotonic_attention - .TransformerModelSimulTrans - .build_model(args, task) - ) - - -def expected_alignment_formula( - p_choose, - mass_perservation=True, - padding_mask=None -): - # Online and Linear-Time Attention by Enforcing Monotonic Alignments - # https://arxiv.org/pdf/1704.00784.pdf - # Eq 18, 19 - bsz, tgt_len, src_len = p_choose.size() - alpha = torch.zeros_like(p_choose) - - if padding_mask is not None: - bsz_pad = padding_mask.size(0) - num_heads = int(bsz / bsz_pad) - padding_mask = ( - padding_mask - .unsqueeze(1) - .expand([bsz_pad, num_heads, src_len]) - .contiguous() - .view(-1, src_len) - ) - - p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0) - - for bsz_i in range(bsz): - for i in range(tgt_len): - for j in range(src_len): - if i == 0: - if j == 0: - # First source token - alpha[bsz_i, i, j] = p_choose[bsz_i, i, j] - else: - # First target token - alpha[bsz_i, i, j] = ( - p_choose[bsz_i, i, j] - * torch.prod( - 1 - p_choose[bsz_i, i, :j] - ) - ) - else: - alpha[bsz_i, i, j] = alpha[bsz_i, i - 1, j] - for k in range(j): - alpha[bsz_i, i, j] += ( - alpha[bsz_i, i - 1, k] - * torch.prod( - 1 - p_choose[bsz_i, i, k:j] - ) - ) - alpha[bsz_i, i, j] *= p_choose[bsz_i, i, j] - - alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0) - - if mass_perservation: - alpha = mass_perservation_formula(alpha, False, padding_mask) - - return alpha - - -def mass_perservation_formula(alpha, left_padding=False, padding_mask=None): - if padding_mask is None or alpha.size(-1) == 1: - if alpha.size(-1) > 1: - alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1) - return alpha - - src_lens = (padding_mask.logical_not()).sum(dim=1).long() - - bsz, tgt_len, src_len = alpha.size() - - assert ( - not left_padding - or (left_padding and (not padding_mask[:, 0].any())) - ) - - alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0) - - for bsz_i in range(bsz): - if left_padding: - alpha[bsz_i, :, -1] = ( - 1 - alpha[bsz_i, :, :-1].sum(dim=-1) - ) - else: - alpha[bsz_i, :, src_lens[bsz_i] - 1] = ( - 1 - alpha[bsz_i, :, :src_lens[bsz_i] - 1].sum(dim=-1) - ) - - return alpha - - -def expected_soft_attention_formula( - alpha, - soft_energy, - padding_mask=None, - chunksize=1e10, -): - # Monotonic Infinite Lookback Attention for Simultaneous Machine Translation - # https://arxiv.org/pdf/1906.05218.pdf - # Eq 14 - - # Monotonic Chunkwise Attention - # https://arxiv.org/abs/1712.05382 - # Eq 17 - bsz, tgt_len, src_len = alpha.size() - beta = torch.zeros_like(alpha) - - if padding_mask is not None: - bsz_pad = padding_mask.size(0) - num_heads = int(bsz / bsz_pad) - # Expanding for potential head dimension - padding_mask = ( - padding_mask - .unsqueeze(1) - .expand([bsz_pad, num_heads, src_len]) - .contiguous() - .view(-1, src_len) - ) - soft_energy = soft_energy.masked_fill(padding_mask.unsqueeze(1), float('-inf')) - - for bsz_i in range(bsz): - for i in range(tgt_len): - for j in range(src_len): - for k in range(j, min([src_len, j + chunksize])): - if not padding_mask[bsz_i, j]: - beta[bsz_i, i, j] += ( - alpha[bsz_i, i, k] * torch.exp(soft_energy[bsz_i, i, j]) - / torch.sum(torch.exp(soft_energy[bsz_i, i, max([0, k - chunksize + 1]):k + 1])) - ) - return beta - - -class MonotonicAttentionTestAbstractClass(object): - def test_forward(self): - sample = make_sample_with_padding() - out, _ = self.model.forward(**sample["net_input"]) - loss = out.sum() - loss.backward() - - def test_p_choose(self): - sample = make_sample_with_padding() - _, extra_out = self.model.forward(**sample["net_input"]) - for item in extra_out.attn_list: - p_choose = item["p_choose"] - self.assertTrue(p_choose.le(1.0).all()) - self.assertTrue(p_choose.ge(0.0).all()) - - def test_expected_alignment(self): - for longer_src in [True, False]: - sample = make_sample_with_padding(longer_src) - _, extra_out = self.model.forward(**sample["net_input"]) - for item in extra_out.attn_list: - p_choose = item["p_choose"] - alpha_system = item["alpha"] - self.assertTrue(p_choose.size() == alpha_system.size()) - bsz, num_head, tgt_len, src_len = alpha_system.size() - alpha_system = alpha_system.view(-1, tgt_len, src_len) - p_choose = p_choose.view(-1, tgt_len, src_len) - - alpha_real = expected_alignment_formula( - p_choose, - self.model.decoder.layers[0].encoder_attn.mass_preservation, - sample["net_input"]["src_tokens"].eq(PAD_INDEX) - ) - - self.assertTrue( - torch.abs(alpha_system - alpha_real).le(5e-5).all(), - ) - - -class HardMonotonicAttentionTestCase( - unittest.TestCase, - MonotonicAttentionTestAbstractClass -): - def setUp(self): - self.model = build_transformer_monotonic_attention( - **generate_config({"simul_type": "hard_aligned"}) - ) - - -class InfiniteLookbackTestCase( - unittest.TestCase, - MonotonicAttentionTestAbstractClass -): - def setUp(self): - self.model = build_transformer_monotonic_attention( - **generate_config( - { - "simul_type": "infinite_lookback" - } - ) - ) - self.model.train() - - def test_fp16_for_long_input(self): - sample = { - "net_input": { - "src_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0), - "prev_output_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0), - "src_lengths": torch.LongTensor([1000]).cuda(), - }, - "target": torch.LongTensor([2] + [7] * 1000).unsqueeze(0).cuda() - } - self.model.cuda().half() - _, extra_out = self.model.forward(**sample["net_input"]) - for item in extra_out.attn_list: - for key in ["p_choose", "alpha", "beta", "soft_energy"]: - self.assertFalse(torch.isnan(item[key]).any()) - - def test_expected_attention(self): - for longer_src in [True, False]: - sample = make_sample_with_padding(longer_src) - _, extra_out = self.model.forward(**sample["net_input"]) - for item in extra_out.attn_list: - p_choose = item["p_choose"] - alpha_system = item["alpha"] - beta_system = item["beta"] - soft_energy_system = item["soft_energy"] - self.assertTrue(beta_system.size() == alpha_system.size()) - self.assertTrue(p_choose.size() == alpha_system.size()) - - bsz, num_head, tgt_len, src_len = alpha_system.size() - - alpha_system = alpha_system.view(-1, tgt_len, src_len) - beta_system = beta_system.view(-1, tgt_len, src_len) - p_choose = p_choose.view(-1, tgt_len, src_len) - soft_energy_system = soft_energy_system.view(-1, tgt_len, src_len) - - alpha_real = expected_alignment_formula( - p_choose, - self.model.decoder.layers[0].encoder_attn.mass_preservation, - sample["net_input"]["src_tokens"].eq(PAD_INDEX) - ) - - beta_real = expected_soft_attention_formula( - alpha_real, - soft_energy_system, - sample["net_input"]["src_tokens"].eq(PAD_INDEX), - chunksize=getattr( - self.model.decoder.layers[0].encoder_attn, - "chunk_size", - int(1e10) - ) - ) - - self.assertTrue( - torch.abs(beta_system - beta_real).le(1e-5).all(), - ) - - -class ChunkwiswTestCase( - InfiniteLookbackTestCase -): - def setUp(self): - self.model = build_transformer_monotonic_attention( - **generate_config( - { - "simul_type": "chunkwise", - "mocha_chunk_size": 3 - } - ) - ) - - -class WaitkTestCase(InfiniteLookbackTestCase): - def setUp(self): - self.model = build_transformer_monotonic_attention( - **generate_config( - { - "simul_type": "waitk", - "waitk_lagging": 3, - } - ) - ) - - def check_waitk(self, p_choose, lagging, padding_mask): - bsz, tgt_len, src_len = p_choose.size() - for bsz_i in range(bsz): - for i in range(tgt_len): - for j in range(src_len): - if not padding_mask[bsz_i, j]: - if j - i == lagging - 1: - self.assertTrue(p_choose[bsz_i, i, j] == 1) - else: - self.assertTrue(p_choose[bsz_i, i, j] == 0) - - def test_waitk_p_choose(self): - for longer_src in [True, False]: - for k in [1, 3, 10, 20, 100]: - sample = make_sample_with_padding(longer_src) - model = build_transformer_monotonic_attention( - **generate_config( - { - "simul_type": "waitk", - "waitk_lagging": k, - } - ) - ) - model.train() - _, extra_out = model.forward(**sample["net_input"]) - for item in extra_out.attn_list: - p_choose = item["p_choose"] - bsz, num_heads, tgt_len, src_len = p_choose.size() - padding_mask = sample["net_input"]["src_tokens"].eq(PAD_INDEX) - padding_mask = ( - padding_mask - .unsqueeze(1) - .expand([bsz, num_heads, src_len]) - .contiguous() - .view(-1, src_len) - ) - p_choose = p_choose.view(bsz * num_heads, tgt_len, src_len) - self.check_waitk(p_choose, k, padding_mask) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py deleted file mode 100644 index 5c7b67f8b1967ca515c5f7606253b46f903ea37e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import fairseq -import soundfile as sf -import torch -import torch.nn.functional as F - -from feature_utils import get_path_iterator, dump_feature - - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_hubert_feature") - - -class HubertFeatureReader(object): - def __init__(self, ckpt_path, layer, max_chunk=1600000): - ( - model, - cfg, - task, - ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) - self.model = model[0].eval().cuda() - self.task = task - self.layer = layer - self.max_chunk = max_chunk - logger.info(f"TASK CONFIG:\n{self.task.cfg}") - logger.info(f" max_chunk = {self.max_chunk}") - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - assert sr == self.task.cfg.sample_rate, sr - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - if ref_len is not None and abs(ref_len - len(wav)) > 160: - logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, path, ref_len=None): - x = self.read_audio(path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float().cuda() - if self.task.cfg.normalize: - x = F.layer_norm(x, x.shape) - x = x.view(1, -1) - - feat = [] - for start in range(0, x.size(1), self.max_chunk): - x_chunk = x[:, start: start + self.max_chunk] - feat_chunk, _ = self.model.extract_features( - source=x_chunk, - padding_mask=None, - mask=False, - output_layer=self.layer, - ) - feat.append(feat_chunk) - return torch.cat(feat, 1).squeeze(0) - - -def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): - reader = HubertFeatureReader(ckpt_path, layer, max_chunk) - generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) - dump_feature(reader, generator, num, split, nshard, rank, feat_dir) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("tsv_dir") - parser.add_argument("split") - parser.add_argument("ckpt_path") - parser.add_argument("layer", type=int) - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("feat_dir") - parser.add_argument("--max_chunk", type=int, default=1600000) - args = parser.parse_args() - logger.info(args) - - main(**vars(args)) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py deleted file mode 100644 index 7cf844a95a075ee9ad318dc11dd71537d1ef6a5b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -import os -import time - -import numpy as np -from sklearn.cluster import MiniBatchKMeans - -import joblib -from examples.textless_nlp.gslm.speech2unit.pretrained.utils import ( - get_and_dump_features, - get_features, -) - - -def get_logger(): - log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" - logging.basicConfig(format=log_format, level=logging.INFO) - logger = logging.getLogger(__name__) - return logger - - -def get_parser(): - parser = argparse.ArgumentParser( - description="Learn K-means clustering over acoustic features." - ) - - # Features arguments - parser.add_argument( - "--in_features_path", type=str, default=None, help="Features file path" - ) - parser.add_argument( - "--feature_type", - type=str, - choices=["logmel", "hubert", "w2v2", "cpc"], - default=None, - help="Acoustic feature type", - ) - parser.add_argument( - "--manifest_path", - type=str, - default=None, - help="Manifest file containing the root dir and file names", - ) - parser.add_argument( - "--out_features_path", - type=str, - default=None, - help="Features file path to write to", - ) - parser.add_argument( - "--checkpoint_path", - type=str, - help="Pretrained acoustic model checkpoint", - ) - parser.add_argument( - "--layer", - type=int, - help="The layer of the pretrained model to extract features from", - default=-1, - ) - parser.add_argument( - "--sample_pct", - type=float, - help="Percent data to use for K-means training", - default=0.1, - ) - - # K-means arguments - parser.add_argument( - "--num_clusters", type=int, help="Nubmer of clusters", default=50 - ) - parser.add_argument("--init", default="k-means++") - parser.add_argument( - "--max_iter", - type=int, - help="Maximum number of iterations for K-means training", - default=150, - ) - parser.add_argument( - "--batch_size", - type=int, - help="Batch size for K-means training", - default=10000, - ) - parser.add_argument("--tol", default=0.0, type=float) - parser.add_argument("--max_no_improvement", default=100, type=int) - parser.add_argument("--n_init", default=20, type=int) - parser.add_argument("--reassignment_ratio", default=0.5, type=float) - parser.add_argument( - "--out_kmeans_model_path", - type=str, - required=True, - help="Path to save K-means model", - ) - - # Leftovers - parser.add_argument( - "--seed", - type=int, - help="Random seed to use for K-means training", - default=1369, - ) - - return parser - - -def get_kmeans_model( - n_clusters, - init, - max_iter, - batch_size, - tol, - max_no_improvement, - n_init, - reassignment_ratio, - random_state, -): - return MiniBatchKMeans( - n_clusters=n_clusters, - init=init, - max_iter=max_iter, - batch_size=batch_size, - tol=tol, - max_no_improvement=max_no_improvement, - n_init=n_init, - reassignment_ratio=reassignment_ratio, - random_state=random_state, - verbose=1, - compute_labels=True, - init_size=None, - ) - - -def train_kmeans(kmeans_model, features_batch): - start_time = time.time() - kmeans_model.fit(features_batch) - time_taken = round((time.time() - start_time) // 60, 2) - return kmeans_model, time_taken - - -def main(args, logger): - # Features loading/extraction for K-means - if args.in_features_path: - # Feature loading - logger.info(f"Loading features from {args.in_features_path}...") - features_batch = np.load(args.in_features_path, allow_pickle=True) - else: - # Feature extraction - logger.info(f"Extracting {args.feature_type} acoustic features...") - features_batch = ( - get_features( - feature_type=args.feature_type, - checkpoint_path=args.checkpoint_path, - layer=args.layer, - manifest_path=args.manifest_path, - sample_pct=args.sample_pct, - flatten=True, - ) - if not args.out_features_path - else get_and_dump_features( - feature_type=args.feature_type, - checkpoint_path=args.checkpoint_path, - layer=args.layer, - manifest_path=args.manifest_path, - sample_pct=args.sample_pct, - flatten=True, - out_features_path=args.out_features_path, - ) - ) - if args.out_features_path: - logger.info( - f"Saved extracted features at {args.out_features_path}" - ) - logger.info(f"Features shape = {features_batch.shape}\n") - - # Learn and save K-means model - kmeans_model = get_kmeans_model( - n_clusters=args.num_clusters, - init=args.init, - max_iter=args.max_iter, - batch_size=args.batch_size, - tol=args.tol, - max_no_improvement=args.max_no_improvement, - n_init=args.n_init, - reassignment_ratio=args.reassignment_ratio, - random_state=args.seed, - ) - logger.info("Starting k-means training...") - kmeans_model, time_taken = train_kmeans( - kmeans_model=kmeans_model, features_batch=features_batch - ) - logger.info(f"...done k-means training in {time_taken} minutes") - inertia = -kmeans_model.score(features_batch) / len(features_batch) - logger.info(f"Total intertia: {round(inertia, 2)}\n") - - logger.info(f"Saving k-means model to {args.out_kmeans_model_path}") - os.makedirs(os.path.dirname(args.out_kmeans_model_path), exist_ok=True) - joblib.dump(kmeans_model, open(args.out_kmeans_model_path, "wb")) - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - logger = get_logger() - logger.info(args) - main(args, logger) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/build.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/build.py deleted file mode 100644 index 3427215746c9a146bd902f22ea9b26d121c36b27..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/build.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch - -from detectron2.utils.logger import _log_api_usage -from detectron2.utils.registry import Registry - -META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip -META_ARCH_REGISTRY.__doc__ = """ -Registry for meta-architectures, i.e. the whole model. - -The registered object will be called with `obj(cfg)` -and expected to return a `nn.Module` object. -""" - - -def build_model(cfg): - """ - Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. - Note that it does not load any weights from ``cfg``. - """ - meta_arch = cfg.MODEL.META_ARCHITECTURE - model = META_ARCH_REGISTRY.get(meta_arch)(cfg) - model.to(torch.device(cfg.MODEL.DEVICE)) - _log_api_usage("modeling.meta_arch." + meta_arch) - return model diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/__init__.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OptimalScale/Robin-7b/lmflow/models/decoder_model.py b/spaces/OptimalScale/Robin-7b/lmflow/models/decoder_model.py deleted file mode 100644 index 19f3c41eb810a890ab37662e8ca0622f3a93b79c..0000000000000000000000000000000000000000 --- a/spaces/OptimalScale/Robin-7b/lmflow/models/decoder_model.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -"""A one-line summary of the module or program, terminated by a period. - -Leave one blank line. The rest of this docstring should contain an -overall description of the module or program. Optionally, it may also -contain a brief description of exported classes and functions and/or usage -examples. - -Typical usage example: - - foo = ClassFoo() - bar = foo.FunctionBar() -""" - -from lmflow.models.base_model import BaseModel - - -class DecoderModel(BaseModel): - - def __init__(self, *args, **kwargs): - pass diff --git a/spaces/Owechada/roopfaceswapr/roop/processors/frame/face_enhancer.py b/spaces/Owechada/roopfaceswapr/roop/processors/frame/face_enhancer.py deleted file mode 100644 index b1501d574fccb5bc80f12b7783f9505cacc48e06..0000000000000000000000000000000000000000 --- a/spaces/Owechada/roopfaceswapr/roop/processors/frame/face_enhancer.py +++ /dev/null @@ -1,89 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import threading -import gfpgan - -import roop.globals -import roop.processors.frame.core -from roop.core import update_status -from roop.face_analyser import get_one_face -from roop.typing import Frame, Face -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video -import torch - -FACE_ENHANCER = None -THREAD_SEMAPHORE = threading.Semaphore() -THREAD_LOCK = threading.Lock() -NAME = 'ROOP.FACE-ENHANCER' -frame_name = 'face_enhancer' - -if torch.cuda.is_available(): - device='cuda' -else: - device='cpu' - - -def get_face_enhancer() -> Any: - global FACE_ENHANCER - - with THREAD_LOCK: - if FACE_ENHANCER is None: - model_path = resolve_relative_path('../models/GFPGANv1.4.pth') - # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399 - FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1,device=device) # type: ignore[attr-defined] - return FACE_ENHANCER - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - # conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth']) - conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth']) - return True - - -def pre_start() -> bool: - if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path): - update_status('Select an image or video for target path.', NAME) - return False - return True - - -def post_process() -> None: - global FACE_ENHANCER - - FACE_ENHANCER = None - - -def enhance_face(temp_frame: Frame) -> Frame: - with THREAD_SEMAPHORE: - _, _, temp_frame = get_face_enhancer().enhance( - temp_frame, - paste_back=True - ) - return temp_frame - - -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = enhance_face(temp_frame) - return temp_frame - - -def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result = process_frame(None, temp_frame) - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_path: str, target_path: str, output_path: str) -> None: - target_frame = cv2.imread(target_path) - result = process_frame(None, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path: str, temp_frame_paths: List[str]) -> None: - roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/readme.md b/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/readme.md deleted file mode 100644 index 5421bfe3e67b7b6cbd7baf96b741b539d65bb0fd..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/readme.md +++ /dev/null @@ -1,9 +0,0 @@ -# Encoding in Style: a StyleGAN Encoder for Image-to-Image Translation - -## Description -Official Implementation of pSp paper for both training and evaluation. The pSp method extends the StyleGAN model to -allow solving different image-to-image translation problems using its encoder. - -Fork from [https://github.com/eladrich/pixel2style2pixel](https://github.com/eladrich/pixel2style2pixel). - -In VToonify, we modify pSp to accept z+ latent code. diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/simple.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/simple.go deleted file mode 100644 index f33e17f8a5a341ef4e3a22a9ae8732068ce679c7..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/simple.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_diff_dyn_complexity.py b/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_diff_dyn_complexity.py deleted file mode 100644 index 0f0e07917828cf080f3b08fb2e769b1776e8d61c..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_diff_dyn_complexity.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import argparse -import csv -import json -import glob - -import tqdm -import numpy as np -import librosa -import musdb -import pyloudnorm as pyln - -from utils import str2bool, db2linear - -parser = argparse.ArgumentParser(description="model test.py") - -parser.add_argument( - "--target", - type=str, - default="all", - help="target source. all, vocals, bass, drums, other.", -) -parser.add_argument( - "--root", - type=str, - default="/path/to/musdb18hq_loudnorm", -) -parser.add_argument( - "--output_directory", - type=str, - default="/path/to/results", -) -parser.add_argument("--exp_name", type=str, default="convtasnet_6_s") -parser.add_argument( - "--calc_results", - type=str2bool, - default=True, - help="Set this True when you want to calculate the results of the test set. Set this False when calculating musdb-hq vs musdb-XL. (top row in Table 1.)", -) - -args, _ = parser.parse_known_args() - -args.sample_rate = 44100 -meter = pyln.Meter(args.sample_rate) - -if args.calc_results: - args.test_output_dir = f"{args.output_directory}/test/{args.exp_name}" -else: - args.test_output_dir = f"{args.output_directory}/{args.exp_name}" - - -est_track_list = glob.glob(f"{args.test_output_dir}/*/{args.target}.wav") -f = open( - f"{args.test_output_dir}/score_feature_{args.target}.json", - encoding="UTF-8", -) -dict_song_score_est = json.loads(f.read()) - -if args.target == "all": - ref_track_list = glob.glob(f"{args.root}/*/mixture.wav") - f = open(f"{args.root}/score_feature.json", encoding="UTF-8") - dict_song_score_ref = json.loads(f.read()) -else: - ref_track_list = glob.glob(f"{args.root}/*/{args.target}.wav") - f = open(f"{args.root}/score_feature_{args.target}.json", encoding="UTF-8") - dict_song_score_ref = json.loads(f.read()) - -i = 0 - -dict_song_score = {} -list_diff_dynamic_complexity = [] - -for track in tqdm.tqdm(ref_track_list): - audio_name = os.path.basename(os.path.dirname(track)) - ref_dyn_complexity = dict_song_score_ref[audio_name]["dynamic_complexity_score"] - est_dyn_complexity = dict_song_score_est[audio_name]["dynamic_complexity_score"] - - list_diff_dynamic_complexity.append(est_dyn_complexity - ref_dyn_complexity) - - i += 1 - -print( - f"Dynamic complexity difference {args.exp_name} vs {os.path.basename(args.root)} on {args.target}" -) -print("mean: ", np.mean(list_diff_dynamic_complexity)) -print("median: ", np.median(list_diff_dynamic_complexity)) -print("std: ", np.std(list_diff_dynamic_complexity)) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/builder.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/builder.py deleted file mode 100644 index 7567316c566bd3aca6d8f65a84b00e9e890948a7..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/builder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..runner import Sequential -from ..utils import Registry, build_from_cfg - - -def build_model_from_cfg(cfg, registry, default_args=None): - """Build a PyTorch model from config dict(s). Different from - ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built. - - Args: - cfg (dict, list[dict]): The config of modules, is is either a config - dict or a list of config dicts. If cfg is a list, a - the built modules will be wrapped with ``nn.Sequential``. - registry (:obj:`Registry`): A registry the module belongs to. - default_args (dict, optional): Default arguments to build the module. - Defaults to None. - - Returns: - nn.Module: A built nn module. - """ - if isinstance(cfg, list): - modules = [ - build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg - ] - return Sequential(*modules) - else: - return build_from_cfg(cfg, registry, default_args) - - -MODELS = Registry('model', build_func=build_model_from_cfg) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/uniformer.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/uniformer.py deleted file mode 100644 index 0c4bb88e4c928540cca9ab609988b916520f5b7a..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/uniformer.py +++ /dev/null @@ -1,422 +0,0 @@ -# -------------------------------------------------------- -# UniFormer -# Copyright (c) 2022 SenseTime X-Lab -# Licensed under The MIT License [see LICENSE for details] -# Written by Kunchang Li -# -------------------------------------------------------- - -from collections import OrderedDict -import math - -from functools import partial -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -import numpy as np -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from annotator.uniformer.mmcv_custom import load_checkpoint -from annotator.uniformer.mmseg.utils import get_root_logger -from ..builder import BACKBONES - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class CMlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Conv2d(in_features, hidden_features, 1) - self.act = act_layer() - self.fc2 = nn.Conv2d(hidden_features, out_features, 1) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class CBlock(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = nn.BatchNorm2d(dim) - self.conv1 = nn.Conv2d(dim, dim, 1) - self.conv2 = nn.Conv2d(dim, dim, 1) - self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = nn.BatchNorm2d(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SABlock(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - B, N, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.transpose(1, 2).reshape(B, N, H, W) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SABlock_Windows(nn.Module): - def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.window_size=window_size - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - x = x.permute(0, 2, 3, 1) - B, H, W, C = x.shape - shortcut = x - x = self.norm1(x) - - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.permute(0, 3, 1, 2).reshape(B, C, H, W) - return x - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) - self.img_size = img_size - self.patch_size = patch_size - self.num_patches = num_patches - self.norm = nn.LayerNorm(embed_dim) - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - - def forward(self, x): - B, _, H, W = x.shape - x = self.proj(x) - B, _, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() - return x - - -@BACKBONES.register_module() -class UniFormer(nn.Module): - """ Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512], - head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), - pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0], - windows=False, hybrid=False, window_size=14): - """ - Args: - layer (list): number of block in each layer - img_size (int, tuple): input image size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - head_dim (int): dimension of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer (nn.Module): normalization layer - pretrained_path (str): path of pretrained model - use_checkpoint (bool): whether use checkpoint - checkpoint_num (list): index for using checkpoint in every stage - windows (bool): whether use window MHRA - hybrid (bool): whether use hybrid MHRA - window_size (int): size of window (>14) - """ - super().__init__() - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.checkpoint_num = checkpoint_num - self.windows = windows - print(f'Use Checkpoint: {self.use_checkpoint}') - print(f'Checkpoint Number: {self.checkpoint_num}') - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed1 = PatchEmbed( - img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0]) - self.patch_embed2 = PatchEmbed( - img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1]) - self.patch_embed3 = PatchEmbed( - img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2]) - self.patch_embed4 = PatchEmbed( - img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3]) - - self.pos_drop = nn.Dropout(p=drop_rate) - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule - num_heads = [dim // head_dim for dim in embed_dim] - self.blocks1 = nn.ModuleList([ - CBlock( - dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(layers[0])]) - self.norm1=norm_layer(embed_dim[0]) - self.blocks2 = nn.ModuleList([ - CBlock( - dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer) - for i in range(layers[1])]) - self.norm2 = norm_layer(embed_dim[1]) - if self.windows: - print('Use local window for all blocks in stage3') - self.blocks3 = nn.ModuleList([ - SABlock_Windows( - dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) - for i in range(layers[2])]) - elif hybrid: - print('Use hybrid window for blocks in stage3') - block3 = [] - for i in range(layers[2]): - if (i + 1) % 4 == 0: - block3.append(SABlock( - dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) - else: - block3.append(SABlock_Windows( - dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) - self.blocks3 = nn.ModuleList(block3) - else: - print('Use global window for all blocks in stage3') - self.blocks3 = nn.ModuleList([ - SABlock( - dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) - for i in range(layers[2])]) - self.norm3 = norm_layer(embed_dim[2]) - self.blocks4 = nn.ModuleList([ - SABlock( - dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer) - for i in range(layers[3])]) - self.norm4 = norm_layer(embed_dim[3]) - - # Representation layer - if representation_size: - self.num_features = representation_size - self.pre_logits = nn.Sequential(OrderedDict([ - ('fc', nn.Linear(embed_dim, representation_size)), - ('act', nn.Tanh()) - ])) - else: - self.pre_logits = nn.Identity() - - self.apply(self._init_weights) - self.init_weights(pretrained=pretrained_path) - - def init_weights(self, pretrained): - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) - print(f'Load pretrained model from {pretrained}') - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def get_classifier(self): - return self.head - - def reset_classifier(self, num_classes, global_pool=''): - self.num_classes = num_classes - self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() - - def forward_features(self, x): - out = [] - x = self.patch_embed1(x) - x = self.pos_drop(x) - for i, blk in enumerate(self.blocks1): - if self.use_checkpoint and i < self.checkpoint_num[0]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm1(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed2(x) - for i, blk in enumerate(self.blocks2): - if self.use_checkpoint and i < self.checkpoint_num[1]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm2(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed3(x) - for i, blk in enumerate(self.blocks3): - if self.use_checkpoint and i < self.checkpoint_num[2]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm3(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed4(x) - for i, blk in enumerate(self.blocks4): - if self.use_checkpoint and i < self.checkpoint_num[3]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm4(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - return tuple(out) - - def forward(self, x): - x = self.forward_features(x) - return x diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/concat_dataset.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/concat_dataset.py deleted file mode 100644 index e5e087c42036f27132ca2c6e1d5252af5fee4a97..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/concat_dataset.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import bisect - -from torch.utils.data.dataset import ConcatDataset as _ConcatDataset - - -class ConcatDataset(_ConcatDataset): - """ - Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra - method for querying the sizes of the image - """ - - def get_idxs(self, idx): - dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) - if dataset_idx == 0: - sample_idx = idx - else: - sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] - return dataset_idx, sample_idx - - def get_img_info(self, idx): - dataset_idx, sample_idx = self.get_idxs(idx) - return self.datasets[dataset_idx].get_img_info(sample_idx) diff --git a/spaces/PranomVignesh/Detecting-unauthorized-person-with-firearms/app.py b/spaces/PranomVignesh/Detecting-unauthorized-person-with-firearms/app.py deleted file mode 100644 index edac7a569e91aea34cc71953fb4b515e3f251249..0000000000000000000000000000000000000000 --- a/spaces/PranomVignesh/Detecting-unauthorized-person-with-firearms/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import gradio as gr -import os -import torch -from transformers import pipeline - -imageClassifier = pipeline(task="image-classification", - model="PranomVignesh/Police-vs-Public") - - -model = torch.hub.load( - 'ultralytics/yolov5', - 'custom', - path='./best.pt', - device="cpu", - force_reload=True -) -model.eval() - - -def predict(image): - results = model([image], size=224) - print(results) - predictions = imageClassifier(image) - classMappings = { - 'police': "Police / Authorized Personnel", - 'public': 'Unauthorized Person' - } - output = {} - for item in predictions: - output[classMappings[item['label']]] = item['score'] - - return results.render()[0], output - - -title = "Detecting Unauthorized Individuals with Firearms" -description = """ - Try the examples at bottom to get started. -""" -examples = [ - [os.path.join(os.path.abspath(''), './examples/sample_1.png')], - [os.path.join(os.path.abspath(''), './examples/sample_2.png')], - [os.path.join(os.path.abspath(''), './examples/sample_3.jpg')], - [os.path.join(os.path.abspath(''), './examples/sample_4.jpg')], - [os.path.join(os.path.abspath(''), './examples/sample_5.jpg')], - [os.path.join(os.path.abspath(''), './examples/sample_6.jpg')], - [os.path.join(os.path.abspath(''), './examples/sample_7.jpg')], - [os.path.join(os.path.abspath(''), './examples/sample_8.jpg')], -] - -inputs = gr.Image(type="pil", shape=(224, 224), - label="Upload your image for detection") - -outputs = [ - gr.Image(type="pil", label="Gun Detections"), - gr.Label(label="Class Prediction") -] - -interface = gr.Interface( - fn=predict, - inputs=inputs, - outputs=outputs, - title=title, - examples=examples, - description=description, - cache_examples=True, - theme='huggingface' -) -interface.launch(debug=True, enable_queue=True) diff --git a/spaces/Prasanthi123/myaiavatarammu/README.md b/spaces/Prasanthi123/myaiavatarammu/README.md deleted file mode 100644 index c914a9cbf613aec80a724661b0be48d118bf7b78..0000000000000000000000000000000000000000 --- a/spaces/Prasanthi123/myaiavatarammu/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Myaiavatarammu -emoji: 🐠 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Quickturtle005/profitability_tool/README.md b/spaces/Quickturtle005/profitability_tool/README.md deleted file mode 100644 index 7773bd3bd36d5aabb297d48b8d30bbfeb3ed5877..0000000000000000000000000000000000000000 --- a/spaces/Quickturtle005/profitability_tool/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Profitability Tool -emoji: 📈 -colorFrom: purple -colorTo: pink -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ReFenter/DeepDanbooru_string/app.py b/spaces/ReFenter/DeepDanbooru_string/app.py deleted file mode 100644 index 49019837c9207cc68cb37be0342f3bc44fd0decb..0000000000000000000000000000000000000000 --- a/spaces/ReFenter/DeepDanbooru_string/app.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import argparse -import functools -import os -import html -import pathlib -import tarfile - -import deepdanbooru as dd -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image -import tensorflow as tf -import piexif -import piexif.helper - -TITLE = 'DeepDanbooru String' - -TOKEN = os.environ['TOKEN'] -MODEL_REPO = 'CikeyQI/DeepDanbooru_string' -MODEL_FILENAME = 'model-resnet_custom_v3.h5' -LABEL_FILENAME = 'tags.txt' - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--score-slider-step', type=float, default=0.05) - parser.add_argument('--score-threshold', type=float, default=0.5) - parser.add_argument('--theme', type=str, default='dark-grass') - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - return parser.parse_args() - - -def load_sample_image_paths() -> list[pathlib.Path]: - image_dir = pathlib.Path('images') - if not image_dir.exists(): - dataset_repo = 'hysts/sample-images-TADNE' - path = huggingface_hub.hf_hub_download(dataset_repo, - 'images.tar.gz', - repo_type='dataset', - use_auth_token=TOKEN) - with tarfile.open(path) as f: - f.extractall() - return sorted(image_dir.glob('*')) - - -def load_model() -> tf.keras.Model: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - MODEL_FILENAME, - use_auth_token=TOKEN) - model = tf.keras.models.load_model(path) - return model - - -def load_labels() -> list[str]: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - LABEL_FILENAME, - use_auth_token=TOKEN) - with open(path) as f: - labels = [line.strip() for line in f.readlines()] - return labels - -def plaintext_to_html(text): - text = "

      " + "
      \n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

      " - return text - -def predict(image: PIL.Image.Image, score_threshold: float, - model: tf.keras.Model, labels: list[str]) -> dict[str, float]: - rawimage = image - _, height, width, _ = model.input_shape - image = np.asarray(image) - image = tf.image.resize(image, - size=(height, width), - method=tf.image.ResizeMethod.AREA, - preserve_aspect_ratio=True) - image = image.numpy() - image = dd.image.transform_and_pad_image(image, width, height) - image = image / 255. - probs = model.predict(image[None, ...])[0] - probs = probs.astype(float) - res = dict() - for prob, label in zip(probs.tolist(), labels): - if prob < score_threshold: - continue - res[label] = prob - b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True)) - a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)') - c = ', '.join(list(b.keys())) - - items = rawimage.info - geninfo = '' - - if "exif" in rawimage.info: - exif = piexif.load(rawimage.info["exif"]) - exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') - try: - exif_comment = piexif.helper.UserComment.load(exif_comment) - except ValueError: - exif_comment = exif_comment.decode('utf8', errors="ignore") - - items['exif comment'] = exif_comment - geninfo = exif_comment - - for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', - 'loop', 'background', 'timestamp', 'duration']: - items.pop(field, None) - - geninfo = items.get('parameters', geninfo) - - info = f""" -

      PNG Info

      -""" - for key, text in items.items(): - info += f""" -
      -

      {plaintext_to_html(str(key))}

      -

      {plaintext_to_html(str(text))}

      -
      -""".strip()+"\n" - - if len(info) == 0: - message = "Nothing found in the image." - info = f"

      {message}

      " - - return (a,c,res,info) - - -def main(): - args = parse_args() - model = load_model() - labels = load_labels() - - func = functools.partial(predict, model=model, labels=labels) - func = functools.update_wrapper(func, predict) - - gr.Interface( - func, - [ - gr.inputs.Image(type='pil', label='Input'), - gr.inputs.Slider(0, - 1, - step=args.score_slider_step, - default=args.score_threshold, - label='Score Threshold'), - ], - [ - gr.outputs.Textbox(label='Output (string)'), - gr.outputs.Textbox(label='Output (raw string)'), - gr.outputs.Label(label='Output (label)'), - gr.outputs.HTML() - ], - examples=[ - ['miku.jpg',0.5], - ['miku2.jpg',0.5] - ], - title=TITLE, - description=''' -Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer. - -Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) - -PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - ''', - theme=args.theme, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py deleted file mode 100644 index f004dd95d97df16167f932587b3ce73b05b04a37..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from .anchor_free_head import AnchorFreeHead -from .anchor_head import AnchorHead -from .atss_head import ATSSHead -from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead -from .centripetal_head import CentripetalHead -from .corner_head import CornerHead -from .embedding_rpn_head import EmbeddingRPNHead -from .fcos_head import FCOSHead -from .fovea_head import FoveaHead -from .free_anchor_retina_head import FreeAnchorRetinaHead -from .fsaf_head import FSAFHead -from .ga_retina_head import GARetinaHead -from .ga_rpn_head import GARPNHead -from .gfl_head import GFLHead -from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead -from .ld_head import LDHead -from .nasfcos_head import NASFCOSHead -from .paa_head import PAAHead -from .pisa_retinanet_head import PISARetinaHead -from .pisa_ssd_head import PISASSDHead -from .reppoints_head import RepPointsHead -from .retina_head import RetinaHead -from .retina_sepbn_head import RetinaSepBNHead -from .rpn_head import RPNHead -from .sabl_retina_head import SABLRetinaHead -from .ssd_head import SSDHead -from .transformer_head import TransformerHead -from .vfnet_head import VFNetHead -from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead -from .yolo_head import YOLOV3Head - -__all__ = [ - 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', - 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', - 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', - 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', - 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', - 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', - 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead', - 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead' -] diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/pipelines/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/pipelines/__init__.py deleted file mode 100644 index 8b9046b07bb4ddea7a707a392b42e72db7c9df67..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/pipelines/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from .compose import Compose -from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, - Transpose, to_tensor) -from .loading import LoadAnnotations, LoadImageFromFile -from .test_time_aug import MultiScaleFlipAug -from .transforms import (CLAHE, AdjustGamma, Normalize, Pad, - PhotoMetricDistortion, RandomCrop, RandomFlip, - RandomRotate, Rerange, Resize, RGB2Gray, SegRescale) - -__all__ = [ - 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', - 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', - 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', - 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', - 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray' -] diff --git a/spaces/Rongjiehuang/GenerSpeech/modules/parallel_wavegan/models/__init__.py b/spaces/Rongjiehuang/GenerSpeech/modules/parallel_wavegan/models/__init__.py deleted file mode 100644 index 4803ba6b2a0afc8022e756ae5b3f4c7403c3c1bd..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/GenerSpeech/modules/parallel_wavegan/models/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .melgan import * # NOQA -from .parallel_wavegan import * # NOQA diff --git a/spaces/SIH/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/utils.py b/spaces/SIH/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/utils.py deleted file mode 100644 index eb94665cf94ff03a04cf47cd5e54535154963de1..0000000000000000000000000000000000000000 --- a/spaces/SIH/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/utils.py +++ /dev/null @@ -1,52 +0,0 @@ -import math -import string - - -def maybe_is_text(s, thresh=2.5): - if len(s) == 0: - return False - # Calculate the entropy of the string - entropy = 0 - for c in string.printable: - p = s.count(c) / len(s) - if p > 0: - entropy += -p * math.log2(p) - - # Check if the entropy is within a reasonable range for text - if entropy > thresh: - return True - return False - - -def maybe_is_code(s): - if len(s) == 0: - return False - # Check if the string contains a lot of non-ascii characters - if len([c for c in s if ord(c) > 128]) / len(s) > 0.1: - return True - return False - - -def strings_similarity(s1, s2): - if len(s1) == 0 or len(s2) == 0: - return 0 - # break the strings into words - s1 = set(s1.split()) - s2 = set(s2.split()) - # return the similarity ratio - return len(s1.intersection(s2)) / len(s1.union(s2)) - - -def maybe_is_truncated(s): - punct = [".", "!", "?", '"'] - if s[-1] in punct: - return False - return True - - -def maybe_is_html(s): - if len(s) == 0: - return False - # check for html tags - if "= -3.) & (x <= 3.)) / 6. - return grad_output * m - - -class HardSigmoidJitAutoFn(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return hard_sigmoid_jit_fwd(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - return hard_sigmoid_jit_bwd(x, grad_output) - - -def hard_sigmoid_me(x, inplace: bool = False): - return HardSigmoidJitAutoFn.apply(x) - - -class HardSigmoidMe(nn.Module): - def __init__(self, inplace: bool = False): - super(HardSigmoidMe, self).__init__() - - def forward(self, x): - return HardSigmoidJitAutoFn.apply(x) - - -@torch.jit.script -def hard_swish_jit_fwd(x): - return x * (x + 3).clamp(min=0, max=6).div(6.) - - -@torch.jit.script -def hard_swish_jit_bwd(x, grad_output): - m = torch.ones_like(x) * (x >= 3.) - m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) - return grad_output * m - - -class HardSwishJitAutoFn(torch.autograd.Function): - """A memory efficient, jit-scripted HardSwish activation""" - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return hard_swish_jit_fwd(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - return hard_swish_jit_bwd(x, grad_output) - - -def hard_swish_me(x, inplace=False): - return HardSwishJitAutoFn.apply(x) - - -class HardSwishMe(nn.Module): - def __init__(self, inplace: bool = False): - super(HardSwishMe, self).__init__() - - def forward(self, x): - return HardSwishJitAutoFn.apply(x) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/masks.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/masks.py deleted file mode 100644 index 995fee72a6d6190c9596a4bf62dc335766b954ee..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/masks.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import itertools -import numpy as np -from typing import Any, Iterator, List, Union -import annotator.oneformer.pycocotools.mask as mask_util -import torch -from torch import device - -from annotator.oneformer.detectron2.layers.roi_align import ROIAlign -from annotator.oneformer.detectron2.utils.memory import retry_if_cuda_oom - -from .boxes import Boxes - - -def polygon_area(x, y): - # Using the shoelace formula - # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) - - -def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: - """ - Args: - polygons (list[ndarray]): each array has shape (Nx2,) - height, width (int) - - Returns: - ndarray: a bool mask of shape (height, width) - """ - if len(polygons) == 0: - # COCOAPI does not support empty polygons - return np.zeros((height, width)).astype(bool) - rles = mask_util.frPyObjects(polygons, height, width) - rle = mask_util.merge(rles) - return mask_util.decode(rle).astype(bool) - - -def rasterize_polygons_within_box( - polygons: List[np.ndarray], box: np.ndarray, mask_size: int -) -> torch.Tensor: - """ - Rasterize the polygons into a mask image and - crop the mask content in the given box. - The cropped mask is resized to (mask_size, mask_size). - - This function is used when generating training targets for mask head in Mask R-CNN. - Given original ground-truth masks for an image, new ground-truth mask - training targets in the size of `mask_size x mask_size` - must be provided for each predicted box. This function will be called to - produce such targets. - - Args: - polygons (list[ndarray[float]]): a list of polygons, which represents an instance. - box: 4-element numpy array - mask_size (int): - - Returns: - Tensor: BoolTensor of shape (mask_size, mask_size) - """ - # 1. Shift the polygons w.r.t the boxes - w, h = box[2] - box[0], box[3] - box[1] - - polygons = copy.deepcopy(polygons) - for p in polygons: - p[0::2] = p[0::2] - box[0] - p[1::2] = p[1::2] - box[1] - - # 2. Rescale the polygons to the new box size - # max() to avoid division by small number - ratio_h = mask_size / max(h, 0.1) - ratio_w = mask_size / max(w, 0.1) - - if ratio_h == ratio_w: - for p in polygons: - p *= ratio_h - else: - for p in polygons: - p[0::2] *= ratio_w - p[1::2] *= ratio_h - - # 3. Rasterize the polygons with coco api - mask = polygons_to_bitmask(polygons, mask_size, mask_size) - mask = torch.from_numpy(mask) - return mask - - -class BitMasks: - """ - This class stores the segmentation masks for all objects in one image, in - the form of bitmaps. - - Attributes: - tensor: bool Tensor of N,H,W, representing N instances in the image. - """ - - def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): - """ - Args: - tensor: bool Tensor of N,H,W, representing N instances in the image. - """ - if isinstance(tensor, torch.Tensor): - tensor = tensor.to(torch.bool) - else: - tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device("cpu")) - assert tensor.dim() == 3, tensor.size() - self.image_size = tensor.shape[1:] - self.tensor = tensor - - @torch.jit.unused - def to(self, *args: Any, **kwargs: Any) -> "BitMasks": - return BitMasks(self.tensor.to(*args, **kwargs)) - - @property - def device(self) -> torch.device: - return self.tensor.device - - @torch.jit.unused - def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": - """ - Returns: - BitMasks: Create a new :class:`BitMasks` by indexing. - - The following usage are allowed: - - 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. - 2. `new_masks = masks[2:10]`: return a slice of masks. - 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor - with `length = len(masks)`. Nonzero elements in the vector will be selected. - - Note that the returned object might share storage with this object, - subject to Pytorch's indexing semantics. - """ - if isinstance(item, int): - return BitMasks(self.tensor[item].unsqueeze(0)) - m = self.tensor[item] - assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( - item, m.shape - ) - return BitMasks(m) - - @torch.jit.unused - def __iter__(self) -> torch.Tensor: - yield from self.tensor - - @torch.jit.unused - def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={})".format(len(self.tensor)) - return s - - def __len__(self) -> int: - return self.tensor.shape[0] - - def nonempty(self) -> torch.Tensor: - """ - Find masks that are non-empty. - - Returns: - Tensor: a BoolTensor which represents - whether each mask is empty (False) or non-empty (True). - """ - return self.tensor.flatten(1).any(dim=1) - - @staticmethod - def from_polygon_masks( - polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int - ) -> "BitMasks": - """ - Args: - polygon_masks (list[list[ndarray]] or PolygonMasks) - height, width (int) - """ - if isinstance(polygon_masks, PolygonMasks): - polygon_masks = polygon_masks.polygons - masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] - if len(masks): - return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) - else: - return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) - - @staticmethod - def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": - """ - Args: - roi_masks: - height, width (int): - """ - return roi_masks.to_bitmasks(height, width) - - def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: - """ - Crop each bitmask by the given box, and resize results to (mask_size, mask_size). - This can be used to prepare training targets for Mask R-CNN. - It has less reconstruction error compared to rasterization with polygons. - However we observe no difference in accuracy, - but BitMasks requires more memory to store all the masks. - - Args: - boxes (Tensor): Nx4 tensor storing the boxes for each mask - mask_size (int): the size of the rasterized mask. - - Returns: - Tensor: - A bool tensor of shape (N, mask_size, mask_size), where - N is the number of predicted boxes for this image. - """ - assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) - device = self.tensor.device - - batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] - rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 - - bit_masks = self.tensor.to(dtype=torch.float32) - rois = rois.to(device=device) - output = ( - ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) - .forward(bit_masks[:, None, :, :], rois) - .squeeze(1) - ) - output = output >= 0.5 - return output - - def get_bounding_boxes(self) -> Boxes: - """ - Returns: - Boxes: tight bounding boxes around bitmasks. - If a mask is empty, it's bounding box will be all zero. - """ - boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) - x_any = torch.any(self.tensor, dim=1) - y_any = torch.any(self.tensor, dim=2) - for idx in range(self.tensor.shape[0]): - x = torch.where(x_any[idx, :])[0] - y = torch.where(y_any[idx, :])[0] - if len(x) > 0 and len(y) > 0: - boxes[idx, :] = torch.as_tensor( - [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 - ) - return Boxes(boxes) - - @staticmethod - def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": - """ - Concatenates a list of BitMasks into a single BitMasks - - Arguments: - bitmasks_list (list[BitMasks]) - - Returns: - BitMasks: the concatenated BitMasks - """ - assert isinstance(bitmasks_list, (list, tuple)) - assert len(bitmasks_list) > 0 - assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) - - cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) - return cat_bitmasks - - -class PolygonMasks: - """ - This class stores the segmentation masks for all objects in one image, in the form of polygons. - - Attributes: - polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. - """ - - def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): - """ - Arguments: - polygons (list[list[np.ndarray]]): The first - level of the list correspond to individual instances, - the second level to all the polygons that compose the - instance, and the third level to the polygon coordinates. - The third level array should have the format of - [x0, y0, x1, y1, ..., xn, yn] (n >= 3). - """ - if not isinstance(polygons, list): - raise ValueError( - "Cannot create PolygonMasks: Expect a list of list of polygons per image. " - "Got '{}' instead.".format(type(polygons)) - ) - - def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: - # Use float64 for higher precision, because why not? - # Always put polygons on CPU (self.to is a no-op) since they - # are supposed to be small tensors. - # May need to change this assumption if GPU placement becomes useful - if isinstance(t, torch.Tensor): - t = t.cpu().numpy() - return np.asarray(t).astype("float64") - - def process_polygons( - polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] - ) -> List[np.ndarray]: - if not isinstance(polygons_per_instance, list): - raise ValueError( - "Cannot create polygons: Expect a list of polygons per instance. " - "Got '{}' instead.".format(type(polygons_per_instance)) - ) - # transform each polygon to a numpy array - polygons_per_instance = [_make_array(p) for p in polygons_per_instance] - for polygon in polygons_per_instance: - if len(polygon) % 2 != 0 or len(polygon) < 6: - raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") - return polygons_per_instance - - self.polygons: List[List[np.ndarray]] = [ - process_polygons(polygons_per_instance) for polygons_per_instance in polygons - ] - - def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": - return self - - @property - def device(self) -> torch.device: - return torch.device("cpu") - - def get_bounding_boxes(self) -> Boxes: - """ - Returns: - Boxes: tight bounding boxes around polygon masks. - """ - boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) - for idx, polygons_per_instance in enumerate(self.polygons): - minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) - maxxy = torch.zeros(2, dtype=torch.float32) - for polygon in polygons_per_instance: - coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) - minxy = torch.min(minxy, torch.min(coords, dim=0).values) - maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) - boxes[idx, :2] = minxy - boxes[idx, 2:] = maxxy - return Boxes(boxes) - - def nonempty(self) -> torch.Tensor: - """ - Find masks that are non-empty. - - Returns: - Tensor: - a BoolTensor which represents whether each mask is empty (False) or not (True). - """ - keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] - return torch.from_numpy(np.asarray(keep, dtype=bool)) - - def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": - """ - Support indexing over the instances and return a `PolygonMasks` object. - `item` can be: - - 1. An integer. It will return an object with only one instance. - 2. A slice. It will return an object with the selected instances. - 3. A list[int]. It will return an object with the selected instances, - correpsonding to the indices in the list. - 4. A vector mask of type BoolTensor, whose length is num_instances. - It will return an object with the instances whose mask is nonzero. - """ - if isinstance(item, int): - selected_polygons = [self.polygons[item]] - elif isinstance(item, slice): - selected_polygons = self.polygons[item] - elif isinstance(item, list): - selected_polygons = [self.polygons[i] for i in item] - elif isinstance(item, torch.Tensor): - # Polygons is a list, so we have to move the indices back to CPU. - if item.dtype == torch.bool: - assert item.dim() == 1, item.shape - item = item.nonzero().squeeze(1).cpu().numpy().tolist() - elif item.dtype in [torch.int32, torch.int64]: - item = item.cpu().numpy().tolist() - else: - raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) - selected_polygons = [self.polygons[i] for i in item] - return PolygonMasks(selected_polygons) - - def __iter__(self) -> Iterator[List[np.ndarray]]: - """ - Yields: - list[ndarray]: the polygons for one instance. - Each Tensor is a float64 vector representing a polygon. - """ - return iter(self.polygons) - - def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={})".format(len(self.polygons)) - return s - - def __len__(self) -> int: - return len(self.polygons) - - def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: - """ - Crop each mask by the given box, and resize results to (mask_size, mask_size). - This can be used to prepare training targets for Mask R-CNN. - - Args: - boxes (Tensor): Nx4 tensor storing the boxes for each mask - mask_size (int): the size of the rasterized mask. - - Returns: - Tensor: A bool tensor of shape (N, mask_size, mask_size), where - N is the number of predicted boxes for this image. - """ - assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) - - device = boxes.device - # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise - # (several small tensors for representing a single instance mask) - boxes = boxes.to(torch.device("cpu")) - - results = [ - rasterize_polygons_within_box(poly, box.numpy(), mask_size) - for poly, box in zip(self.polygons, boxes) - ] - """ - poly: list[list[float]], the polygons for one instance - box: a tensor of shape (4,) - """ - if len(results) == 0: - return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) - return torch.stack(results, dim=0).to(device=device) - - def area(self): - """ - Computes area of the mask. - Only works with Polygons, using the shoelace formula: - https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - - Returns: - Tensor: a vector, area for each instance - """ - - area = [] - for polygons_per_instance in self.polygons: - area_per_instance = 0 - for p in polygons_per_instance: - area_per_instance += polygon_area(p[0::2], p[1::2]) - area.append(area_per_instance) - - return torch.tensor(area) - - @staticmethod - def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": - """ - Concatenates a list of PolygonMasks into a single PolygonMasks - - Arguments: - polymasks_list (list[PolygonMasks]) - - Returns: - PolygonMasks: the concatenated PolygonMasks - """ - assert isinstance(polymasks_list, (list, tuple)) - assert len(polymasks_list) > 0 - assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) - - cat_polymasks = type(polymasks_list[0])( - list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) - ) - return cat_polymasks - - -class ROIMasks: - """ - Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, - full-image bitmask can be obtained by "pasting" the mask on the region defined - by the corresponding ROI box. - """ - - def __init__(self, tensor: torch.Tensor): - """ - Args: - tensor: (N, M, M) mask tensor that defines the mask within each ROI. - """ - if tensor.dim() != 3: - raise ValueError("ROIMasks must take a masks of 3 dimension.") - self.tensor = tensor - - def to(self, device: torch.device) -> "ROIMasks": - return ROIMasks(self.tensor.to(device)) - - @property - def device(self) -> device: - return self.tensor.device - - def __len__(self): - return self.tensor.shape[0] - - def __getitem__(self, item) -> "ROIMasks": - """ - Returns: - ROIMasks: Create a new :class:`ROIMasks` by indexing. - - The following usage are allowed: - - 1. `new_masks = masks[2:10]`: return a slice of masks. - 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor - with `length = len(masks)`. Nonzero elements in the vector will be selected. - - Note that the returned object might share storage with this object, - subject to Pytorch's indexing semantics. - """ - t = self.tensor[item] - if t.dim() != 3: - raise ValueError( - f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" - ) - return ROIMasks(t) - - @torch.jit.unused - def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={})".format(len(self.tensor)) - return s - - @torch.jit.unused - def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): - """ - Args: see documentation of :func:`paste_masks_in_image`. - """ - from annotator.oneformer.detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape - - if torch.jit.is_tracing(): - if isinstance(height, torch.Tensor): - paste_func = _paste_masks_tensor_shape - else: - paste_func = paste_masks_in_image - else: - paste_func = retry_if_cuda_oom(paste_masks_in_image) - bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) - return BitMasks(bitmasks) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ann_head.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ann_head.py deleted file mode 100644 index 30aaacc2cafc568d3de71d1477b4de0dc0fea9d3..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ann_head.py +++ /dev/null @@ -1,245 +0,0 @@ -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from ..builder import HEADS -from ..utils import SelfAttentionBlock as _SelfAttentionBlock -from .decode_head import BaseDecodeHead - - -class PPMConcat(nn.ModuleList): - """Pyramid Pooling Module that only concat the features of each layer. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. - """ - - def __init__(self, pool_scales=(1, 3, 6, 8)): - super(PPMConcat, self).__init__( - [nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales]) - - def forward(self, feats): - """Forward function.""" - ppm_outs = [] - for ppm in self: - ppm_out = ppm(feats) - ppm_outs.append(ppm_out.view(*feats.shape[:2], -1)) - concat_outs = torch.cat(ppm_outs, dim=2) - return concat_outs - - -class SelfAttentionBlock(_SelfAttentionBlock): - """Make a ANN used SelfAttentionBlock. - - Args: - low_in_channels (int): Input channels of lower level feature, - which is the key feature for self-attention. - high_in_channels (int): Input channels of higher level feature, - which is the query feature for self-attention. - channels (int): Output channels of key/query transform. - out_channels (int): Output channels. - share_key_query (bool): Whether share projection weight between key - and query projection. - query_scale (int): The scale of query feature map. - key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module of key feature. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict|None): Config of activation layers. - """ - - def __init__(self, low_in_channels, high_in_channels, channels, - out_channels, share_key_query, query_scale, key_pool_scales, - conv_cfg, norm_cfg, act_cfg): - key_psp = PPMConcat(key_pool_scales) - if query_scale > 1: - query_downsample = nn.MaxPool2d(kernel_size=query_scale) - else: - query_downsample = None - super(SelfAttentionBlock, self).__init__( - key_in_channels=low_in_channels, - query_in_channels=high_in_channels, - channels=channels, - out_channels=out_channels, - share_key_query=share_key_query, - query_downsample=query_downsample, - key_downsample=key_psp, - key_query_num_convs=1, - key_query_norm=True, - value_out_num_convs=1, - value_out_norm=False, - matmul_norm=True, - with_out=True, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - -class AFNB(nn.Module): - """Asymmetric Fusion Non-local Block(AFNB) - - Args: - low_in_channels (int): Input channels of lower level feature, - which is the key feature for self-attention. - high_in_channels (int): Input channels of higher level feature, - which is the query feature for self-attention. - channels (int): Output channels of key/query transform. - out_channels (int): Output channels. - and query projection. - query_scales (tuple[int]): The scales of query feature map. - Default: (1,) - key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module of key feature. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict|None): Config of activation layers. - """ - - def __init__(self, low_in_channels, high_in_channels, channels, - out_channels, query_scales, key_pool_scales, conv_cfg, - norm_cfg, act_cfg): - super(AFNB, self).__init__() - self.stages = nn.ModuleList() - for query_scale in query_scales: - self.stages.append( - SelfAttentionBlock( - low_in_channels=low_in_channels, - high_in_channels=high_in_channels, - channels=channels, - out_channels=out_channels, - share_key_query=False, - query_scale=query_scale, - key_pool_scales=key_pool_scales, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - self.bottleneck = ConvModule( - out_channels + high_in_channels, - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, low_feats, high_feats): - """Forward function.""" - priors = [stage(high_feats, low_feats) for stage in self.stages] - context = torch.stack(priors, dim=0).sum(dim=0) - output = self.bottleneck(torch.cat([context, high_feats], 1)) - return output - - -class APNB(nn.Module): - """Asymmetric Pyramid Non-local Block (APNB) - - Args: - in_channels (int): Input channels of key/query feature, - which is the key feature for self-attention. - channels (int): Output channels of key/query transform. - out_channels (int): Output channels. - query_scales (tuple[int]): The scales of query feature map. - Default: (1,) - key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module of key feature. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict|None): Config of activation layers. - """ - - def __init__(self, in_channels, channels, out_channels, query_scales, - key_pool_scales, conv_cfg, norm_cfg, act_cfg): - super(APNB, self).__init__() - self.stages = nn.ModuleList() - for query_scale in query_scales: - self.stages.append( - SelfAttentionBlock( - low_in_channels=in_channels, - high_in_channels=in_channels, - channels=channels, - out_channels=out_channels, - share_key_query=True, - query_scale=query_scale, - key_pool_scales=key_pool_scales, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - self.bottleneck = ConvModule( - 2 * in_channels, - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - def forward(self, feats): - """Forward function.""" - priors = [stage(feats, feats) for stage in self.stages] - context = torch.stack(priors, dim=0).sum(dim=0) - output = self.bottleneck(torch.cat([context, feats], 1)) - return output - - -@HEADS.register_module() -class ANNHead(BaseDecodeHead): - """Asymmetric Non-local Neural Networks for Semantic Segmentation. - - This head is the implementation of `ANNNet - `_. - - Args: - project_channels (int): Projection channels for Nonlocal. - query_scales (tuple[int]): The scales of query feature map. - Default: (1,) - key_pool_scales (tuple[int]): The pooling scales of key feature map. - Default: (1, 3, 6, 8). - """ - - def __init__(self, - project_channels, - query_scales=(1, ), - key_pool_scales=(1, 3, 6, 8), - **kwargs): - super(ANNHead, self).__init__( - input_transform='multiple_select', **kwargs) - assert len(self.in_channels) == 2 - low_in_channels, high_in_channels = self.in_channels - self.project_channels = project_channels - self.fusion = AFNB( - low_in_channels=low_in_channels, - high_in_channels=high_in_channels, - out_channels=high_in_channels, - channels=project_channels, - query_scales=query_scales, - key_pool_scales=key_pool_scales, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.bottleneck = ConvModule( - high_in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.context = APNB( - in_channels=self.channels, - out_channels=self.channels, - channels=project_channels, - query_scales=query_scales, - key_pool_scales=key_pool_scales, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - low_feats, high_feats = self._transform_inputs(inputs) - output = self.fusion(low_feats, high_feats) - output = self.dropout(output) - output = self.bottleneck(output) - output = self.context(output) - output = self.cls_seg(output) - - return output diff --git a/spaces/THEFIG/AI-chatbot/README.md b/spaces/THEFIG/AI-chatbot/README.md deleted file mode 100644 index 7b220248d4c67b02335fa1d3fd07187420be7ad8..0000000000000000000000000000000000000000 --- a/spaces/THEFIG/AI-chatbot/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: AI Chatbot -emoji: 🌍 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.36.1 -app_file: gradioapp.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - - diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py deleted file mode 100644 index 1becc5093c5ab8e196bb9fee415e2381e7158fc3..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py +++ /dev/null @@ -1,6 +0,0 @@ -__all__ = ["Mapping", "Sequence"] - -try: - from collections.abc import Mapping, Sequence -except ImportError: - from collections import Mapping, Sequence diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/common/optim.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/common/optim.py deleted file mode 100644 index d39d3aaa546c17e831d21d1758b69e8c1609415e..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/common/optim.py +++ /dev/null @@ -1,15 +0,0 @@ -import torch - -from detectron2.config import LazyCall as L -from detectron2.solver.build import get_default_optimizer_params - -SGD = L(torch.optim.SGD)( - params=L(get_default_optimizer_params)( - # params.model is meant to be set to the model object, before instantiating - # the optimizer. - weight_decay_norm=0.0 - ), - lr=0.02, - momentum=0.9, - weight_decay=1e-4, -) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/solver/build.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/solver/build.py deleted file mode 100644 index 1989dfcd0855d833a75e403f6a5e88725d78022f..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/solver/build.py +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import itertools -import logging -from collections import defaultdict -from enum import Enum -from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union -import torch -from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler - -from detectron2.config import CfgNode - -from .lr_scheduler import LRMultiplier, WarmupParamScheduler - -_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] -_GradientClipper = Callable[[_GradientClipperInput], None] - - -class GradientClipType(Enum): - VALUE = "value" - NORM = "norm" - - -def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: - """ - Creates gradient clipping closure to clip by value or by norm, - according to the provided config. - """ - cfg = copy.deepcopy(cfg) - - def clip_grad_norm(p: _GradientClipperInput): - torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) - - def clip_grad_value(p: _GradientClipperInput): - torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) - - _GRADIENT_CLIP_TYPE_TO_CLIPPER = { - GradientClipType.VALUE: clip_grad_value, - GradientClipType.NORM: clip_grad_norm, - } - return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] - - -def _generate_optimizer_class_with_gradient_clipping( - optimizer: Type[torch.optim.Optimizer], - *, - per_param_clipper: Optional[_GradientClipper] = None, - global_clipper: Optional[_GradientClipper] = None, -) -> Type[torch.optim.Optimizer]: - """ - Dynamically creates a new type that inherits the type of a given instance - and overrides the `step` method to add gradient clipping - """ - assert ( - per_param_clipper is None or global_clipper is None - ), "Not allowed to use both per-parameter clipping and global clipping" - - def optimizer_wgc_step(self, closure=None): - if per_param_clipper is not None: - for group in self.param_groups: - for p in group["params"]: - per_param_clipper(p) - else: - # global clipper for future use with detr - # (https://github.com/facebookresearch/detr/pull/287) - all_params = itertools.chain(*[g["params"] for g in self.param_groups]) - global_clipper(all_params) - super(type(self), self).step(closure) - - OptimizerWithGradientClip = type( - optimizer.__name__ + "WithGradientClip", - (optimizer,), - {"step": optimizer_wgc_step}, - ) - return OptimizerWithGradientClip - - -def maybe_add_gradient_clipping( - cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] -) -> Type[torch.optim.Optimizer]: - """ - If gradient clipping is enabled through config options, wraps the existing - optimizer type to become a new dynamically created class OptimizerWithGradientClip - that inherits the given optimizer and overrides the `step` method to - include gradient clipping. - - Args: - cfg: CfgNode, configuration options - optimizer: type. A subclass of torch.optim.Optimizer - - Return: - type: either the input `optimizer` (if gradient clipping is disabled), or - a subclass of it with gradient clipping included in the `step` method. - """ - if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: - return optimizer - if isinstance(optimizer, torch.optim.Optimizer): - optimizer_type = type(optimizer) - else: - assert issubclass(optimizer, torch.optim.Optimizer), optimizer - optimizer_type = optimizer - - grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) - OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( - optimizer_type, per_param_clipper=grad_clipper - ) - if isinstance(optimizer, torch.optim.Optimizer): - optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended - return optimizer - else: - return OptimizerWithGradientClip - - -def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: - """ - Build an optimizer from config. - """ - params = get_default_optimizer_params( - model, - base_lr=cfg.SOLVER.BASE_LR, - weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, - bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, - weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, - ) - return maybe_add_gradient_clipping(cfg, torch.optim.SGD)( - params, - lr=cfg.SOLVER.BASE_LR, - momentum=cfg.SOLVER.MOMENTUM, - nesterov=cfg.SOLVER.NESTEROV, - weight_decay=cfg.SOLVER.WEIGHT_DECAY, - ) - - -def get_default_optimizer_params( - model: torch.nn.Module, - base_lr: Optional[float] = None, - weight_decay: Optional[float] = None, - weight_decay_norm: Optional[float] = None, - bias_lr_factor: Optional[float] = 1.0, - weight_decay_bias: Optional[float] = None, - overrides: Optional[Dict[str, Dict[str, float]]] = None, -) -> List[Dict[str, Any]]: - """ - Get default param list for optimizer, with support for a few types of - overrides. If no overrides needed, this is equivalent to `model.parameters()`. - - Args: - base_lr: lr for every group by default. Can be omitted to use the one in optimizer. - weight_decay: weight decay for every group by default. Can be omitted to use the one - in optimizer. - weight_decay_norm: override weight decay for params in normalization layers - bias_lr_factor: multiplier of lr for bias parameters. - weight_decay_bias: override weight decay for bias parameters - overrides: if not `None`, provides values for optimizer hyperparameters - (LR, weight decay) for module parameters with a given name; e.g. - ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and - weight decay values for all module parameters named `embedding`. - - For common detection models, ``weight_decay_norm`` is the only option - needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings - from Detectron1 that are not found useful. - - Example: - :: - torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), - lr=0.01, weight_decay=1e-4, momentum=0.9) - """ - if overrides is None: - overrides = {} - defaults = {} - if base_lr is not None: - defaults["lr"] = base_lr - if weight_decay is not None: - defaults["weight_decay"] = weight_decay - bias_overrides = {} - if bias_lr_factor is not None and bias_lr_factor != 1.0: - # NOTE: unlike Detectron v1, we now by default make bias hyperparameters - # exactly the same as regular weights. - if base_lr is None: - raise ValueError("bias_lr_factor requires base_lr") - bias_overrides["lr"] = base_lr * bias_lr_factor - if weight_decay_bias is not None: - bias_overrides["weight_decay"] = weight_decay_bias - if len(bias_overrides): - if "bias" in overrides: - raise ValueError("Conflicting overrides for 'bias'") - overrides["bias"] = bias_overrides - - norm_module_types = ( - torch.nn.BatchNorm1d, - torch.nn.BatchNorm2d, - torch.nn.BatchNorm3d, - torch.nn.SyncBatchNorm, - # NaiveSyncBatchNorm inherits from BatchNorm2d - torch.nn.GroupNorm, - torch.nn.InstanceNorm1d, - torch.nn.InstanceNorm2d, - torch.nn.InstanceNorm3d, - torch.nn.LayerNorm, - torch.nn.LocalResponseNorm, - ) - params: List[Dict[str, Any]] = [] - memo: Set[torch.nn.parameter.Parameter] = set() - for module in model.modules(): - for module_param_name, value in module.named_parameters(recurse=False): - if not value.requires_grad: - continue - # Avoid duplicating parameters - if value in memo: - continue - memo.add(value) - - hyperparams = copy.copy(defaults) - if isinstance(module, norm_module_types) and weight_decay_norm is not None: - hyperparams["weight_decay"] = weight_decay_norm - hyperparams.update(overrides.get(module_param_name, {})) - params.append({"params": [value], **hyperparams}) - return reduce_param_groups(params) - - -def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - # Transform parameter groups into per-parameter structure. - # Later items in `params` can overwrite parameters set in previous items. - ret = defaultdict(dict) - for item in params: - assert "params" in item - cur_params = {x: y for x, y in item.items() if x != "params"} - for param in item["params"]: - ret[param].update({"params": [param], **cur_params}) - return list(ret.values()) - - -def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - # Reorganize the parameter groups and merge duplicated groups. - # The number of parameter groups needs to be as small as possible in order - # to efficiently use the PyTorch multi-tensor optimizer. Therefore instead - # of using a parameter_group per single parameter, we reorganize the - # parameter groups and merge duplicated groups. This approach speeds - # up multi-tensor optimizer significantly. - params = _expand_param_groups(params) - groups = defaultdict(list) # re-group all parameter groups by their hyperparams - for item in params: - cur_params = tuple((x, y) for x, y in item.items() if x != "params") - groups[cur_params].extend(item["params"]) - ret = [] - for param_keys, param_values in groups.items(): - cur = {kv[0]: kv[1] for kv in param_keys} - cur["params"] = param_values - ret.append(cur) - return ret - - -def build_lr_scheduler( - cfg: CfgNode, optimizer: torch.optim.Optimizer -) -> torch.optim.lr_scheduler._LRScheduler: - """ - Build a LR scheduler from config. - """ - name = cfg.SOLVER.LR_SCHEDULER_NAME - - if name == "WarmupMultiStepLR": - steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] - if len(steps) != len(cfg.SOLVER.STEPS): - logger = logging.getLogger(__name__) - logger.warning( - "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " - "These values will be ignored." - ) - sched = MultiStepParamScheduler( - values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)], - milestones=steps, - num_updates=cfg.SOLVER.MAX_ITER, - ) - elif name == "WarmupCosineLR": - sched = CosineParamScheduler(1, 0) - else: - raise ValueError("Unknown LR scheduler: {}".format(name)) - - sched = WarmupParamScheduler( - sched, - cfg.SOLVER.WARMUP_FACTOR, - min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), - cfg.SOLVER.WARMUP_METHOD, - ) - return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_augmentation_impl.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_augmentation_impl.py deleted file mode 100644 index 5a69e178a5ac67f69c2eeab667b9c0740a862eee..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_augmentation_impl.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# Modified by Xingyi Zhou -""" -Implement many useful :class:`Augmentation`. -""" -import numpy as np -import sys -from fvcore.transforms.transform import ( - BlendTransform, - CropTransform, - HFlipTransform, - NoOpTransform, - Transform, - VFlipTransform, -) -from PIL import Image - -from detectron2.data.transforms.augmentation import Augmentation -from .custom_transform import EfficientDetResizeCropTransform - -__all__ = [ - "EfficientDetResizeCrop", -] - - -class EfficientDetResizeCrop(Augmentation): - """ - Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge. - If `max_size` is reached, then downscale so that the longer edge does not exceed max_size. - """ - - def __init__( - self, size, scale, interp=Image.BILINEAR - ): - """ - Args: - """ - super().__init__() - self.target_size = (size, size) - self.scale = scale - self.interp = interp - - def get_transform(self, img): - # Select a random scale factor. - scale_factor = np.random.uniform(*self.scale) - scaled_target_height = scale_factor * self.target_size[0] - scaled_target_width = scale_factor * self.target_size[1] - # Recompute the accurate scale_factor using rounded scaled image size. - width, height = img.shape[1], img.shape[0] - img_scale_y = scaled_target_height / height - img_scale_x = scaled_target_width / width - img_scale = min(img_scale_y, img_scale_x) - - # Select non-zero random offset (x, y) if scaled image is larger than target size - scaled_h = int(height * img_scale) - scaled_w = int(width * img_scale) - offset_y = scaled_h - self.target_size[0] - offset_x = scaled_w - self.target_size[1] - offset_y = int(max(0.0, float(offset_y)) * np.random.uniform(0, 1)) - offset_x = int(max(0.0, float(offset_x)) * np.random.uniform(0, 1)) - return EfficientDetResizeCropTransform( - scaled_h, scaled_w, offset_y, offset_x, img_scale, self.target_size, self.interp) diff --git a/spaces/Tuana/what-would-mother-say/utils/__init__.py b/spaces/Tuana/what-would-mother-say/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/VideoCrafter/VideoCrafter/lvdm/modules/networks/openaimodel3d.py b/spaces/VideoCrafter/VideoCrafter/lvdm/modules/networks/openaimodel3d.py deleted file mode 100644 index 328fac71761a69b461f33946d6a1aa08622ecd8f..0000000000000000000000000000000000000000 --- a/spaces/VideoCrafter/VideoCrafter/lvdm/modules/networks/openaimodel3d.py +++ /dev/null @@ -1,577 +0,0 @@ -from functools import partial -from abc import abstractmethod -import torch -import torch.nn as nn -from einops import rearrange -import torch.nn.functional as F -from lvdm.models.utils_diffusion import timestep_embedding -from lvdm.common import checkpoint -from lvdm.basics import ( - zero_module, - conv_nd, - linear, - avg_pool_nd, - normalization -) -from lvdm.modules.attention import SpatialTransformer, TemporalTransformer - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None, batch_size=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb, batch_size) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context) - elif isinstance(layer, TemporalTransformer): - x = rearrange(x, '(b f) c h w -> b c f h w', b=batch_size) - x = layer(x, context) - x = rearrange(x, 'b c f h w -> (b f) c h w') - else: - x = layer(x,) - return x - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=padding - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest') - else: - x = F.interpolate(x, scale_factor=2, mode='nearest') - if self.use_conv: - x = self.conv(x) - return x - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - use_conv=False, - up=False, - down=False, - use_temporal_conv=False, - tempspatial_aware=False - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - self.use_temporal_conv = use_temporal_conv - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - nn.Linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module(nn.Conv2d(self.out_channels, self.out_channels, 3, padding=1)), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - if self.use_temporal_conv: - self.temopral_conv = TemporalConvBlock( - self.out_channels, - self.out_channels, - dropout=0.1, - spatial_aware=tempspatial_aware - ) - - def forward(self, x, emb, batch_size=None): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - input_tuple = (x, emb,) - if batch_size: - forward_batchsize = partial(self._forward, batch_size=batch_size) - return checkpoint(forward_batchsize, input_tuple, self.parameters(), self.use_checkpoint) - return checkpoint(self._forward, input_tuple, self.parameters(), self.use_checkpoint) - - def _forward(self, x, emb, batch_size=None,): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = torch.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - h = self.skip_connection(x) + h - - if self.use_temporal_conv and batch_size: - h = rearrange(h, '(b t) c h w -> b c t h w', b=batch_size) - h = self.temopral_conv(h) - h = rearrange(h, 'b c t h w -> (b t) c h w') - return h - - -class TemporalConvBlock(nn.Module): - """ - Adapted from modelscope: https://github.com/modelscope/modelscope/blob/master/modelscope/models/multi_modal/video_synthesis/unet_sd.py - """ - - def __init__(self, in_channels, out_channels=None, dropout=0.0, spatial_aware=False): - super(TemporalConvBlock, self).__init__() - if out_channels is None: - out_channels = in_channels - self.in_channels = in_channels - self.out_channels = out_channels - kernel_shape = (3, 1, 1) if not spatial_aware else (3, 3, 3) - padding_shape = (1, 0, 0) if not spatial_aware else (1, 1, 1) - - # conv layers - self.conv1 = nn.Sequential( - nn.GroupNorm(32, in_channels), nn.SiLU(), - nn.Conv3d(in_channels, out_channels, kernel_shape, padding=padding_shape)) - self.conv2 = nn.Sequential( - nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout), - nn.Conv3d(out_channels, in_channels, kernel_shape, padding=padding_shape)) - self.conv3 = nn.Sequential( - nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout), - nn.Conv3d(out_channels, in_channels, (3, 1, 1), padding=(1, 0, 0))) - self.conv4 = nn.Sequential( - nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout), - nn.Conv3d(out_channels, in_channels, (3, 1, 1), padding=(1, 0, 0))) - - # zero out the last layer params,so the conv block is identity - nn.init.zeros_(self.conv4[-1].weight) - nn.init.zeros_(self.conv4[-1].bias) - - def forward(self, x): - identity = x - x = self.conv1(x) - x = self.conv2(x) - x = self.conv3(x) - x = self.conv4(x) - - return x + identity - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - :param in_channels: in_channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - """ - - def __init__(self, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0.0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - context_dim=None, - use_scale_shift_norm=False, - resblock_updown=False, - num_heads=-1, - num_head_channels=-1, - transformer_depth=1, - use_linear=False, - use_checkpoint=False, - temporal_conv=False, - tempspatial_aware=False, - temporal_attention=True, - temporal_selfatt_only=True, - use_relative_position=True, - use_causal_attention=False, - temporal_length=None, - use_fp16=False, - addition_attention=False, - use_image_attention=False, - temporal_transformer_depth=1, - fps_cond=False, - ): - super(UNetModel, self).__init__() - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.temporal_attention = temporal_attention - time_embed_dim = model_channels * 4 - self.use_checkpoint = use_checkpoint - self.dtype = torch.float16 if use_fp16 else torch.float32 - self.addition_attention=addition_attention - self.use_image_attention = use_image_attention - self.fps_cond=fps_cond - - - - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - if self.fps_cond: - self.fps_embedding = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1)) - ] - ) - if self.addition_attention: - self.init_attn=TimestepEmbedSequential( - TemporalTransformer( - model_channels, - n_heads=8, - d_head=num_head_channels, - depth=transformer_depth, - context_dim=context_dim, - use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only, - causal_attention=use_causal_attention, relative_position=use_relative_position, - temporal_length=temporal_length)) - - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock(ch, time_embed_dim, dropout, - out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware, - use_temporal_conv=temporal_conv - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - layers.append( - SpatialTransformer(ch, num_heads, dim_head, - depth=transformer_depth, context_dim=context_dim, use_linear=use_linear, - use_checkpoint=use_checkpoint, disable_self_attn=False, - img_cross_attention=self.use_image_attention - ) - ) - if self.temporal_attention: - layers.append( - TemporalTransformer(ch, num_heads, dim_head, - depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear, - use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only, - causal_attention=use_causal_attention, relative_position=use_relative_position, - temporal_length=temporal_length - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock(ch, time_embed_dim, dropout, - out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True - ) - if resblock_updown - else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - layers = [ - ResBlock(ch, time_embed_dim, dropout, - dims=dims, use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware, - use_temporal_conv=temporal_conv - ), - SpatialTransformer(ch, num_heads, dim_head, - depth=transformer_depth, context_dim=context_dim, use_linear=use_linear, - use_checkpoint=use_checkpoint, disable_self_attn=False, - img_cross_attention=self.use_image_attention - ) - ] - if self.temporal_attention: - layers.append( - TemporalTransformer(ch, num_heads, dim_head, - depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear, - use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only, - causal_attention=use_causal_attention, relative_position=use_relative_position, - temporal_length=temporal_length - ) - ) - layers.append( - ResBlock(ch, time_embed_dim, dropout, - dims=dims, use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware, - use_temporal_conv=temporal_conv - ) - ) - self.middle_block = TimestepEmbedSequential(*layers) - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock(ch + ich, time_embed_dim, dropout, - out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware, - use_temporal_conv=temporal_conv - ) - ] - ch = model_channels * mult - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - layers.append( - SpatialTransformer(ch, num_heads, dim_head, - depth=transformer_depth, context_dim=context_dim, use_linear=use_linear, - use_checkpoint=use_checkpoint, disable_self_attn=False, - img_cross_attention=self.use_image_attention - ) - ) - if self.temporal_attention: - layers.append( - TemporalTransformer(ch, num_heads, dim_head, - depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear, - use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only, - causal_attention=use_causal_attention, relative_position=use_relative_position, - temporal_length=temporal_length - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock(ch, time_embed_dim, dropout, - out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), - ) - - def forward(self, x, timesteps, context=None, features_adapter=None, fps=16, **kwargs): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - - if self.fps_cond: - if type(fps) == int: - fps = torch.full_like(timesteps, fps) - fps_emb = timestep_embedding(fps,self.model_channels, repeat_only=False) - emb += self.fps_embedding(fps_emb) - - b,_,t,_,_ = x.shape - ## repeat t times for context [(b t) 77 768] & time embedding - context = context.repeat_interleave(repeats=t, dim=0) - emb = emb.repeat_interleave(repeats=t, dim=0) - - ## always in shape (b t) c h w, except for temporal layer - x = rearrange(x, 'b c t h w -> (b t) c h w') - - h = x.type(self.dtype) - adapter_idx = 0 - hs = [] - for id, module in enumerate(self.input_blocks): - h = module(h, emb, context=context, batch_size=b) - if id ==0 and self.addition_attention: - h = self.init_attn(h, emb, context=context, batch_size=b) - ## plug-in adapter features - if ((id+1)%3 == 0) and features_adapter is not None: - h = h + features_adapter[adapter_idx] - adapter_idx += 1 - hs.append(h) - if features_adapter is not None: - assert len(features_adapter)==adapter_idx, 'Wrong features_adapter' - - h = self.middle_block(h, emb, context=context, batch_size=b) - for module in self.output_blocks: - h = torch.cat([h, hs.pop()], dim=1) - h = module(h, emb, context=context, batch_size=b) - h = h.type(x.dtype) - y = self.out(h) - - # reshape back to (b c t h w) - y = rearrange(y, '(b t) c h w -> b c t h w', b=b) - return y - \ No newline at end of file diff --git a/spaces/Wootang01/keyword_extractor/app.py b/spaces/Wootang01/keyword_extractor/app.py deleted file mode 100644 index f5ae2df93c5a9a6571a981ad14912f2a6c6acc60..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/keyword_extractor/app.py +++ /dev/null @@ -1,42 +0,0 @@ -import pandas as pd -import streamlit as st -from keybert import KeyBERT - -@st.cache(allow_output_mutation=True, suppress_st_warning=True, show_spinner=True) -def load_model(): - model = KeyBERT("sentence-transformers/xlm-r-distilroberta-base-paraphrase-v1") - return model - -model = load_model() - -placeholder = st.empty() -text_input = placeholder.text_area("Paste or write text", height=300) - -top_n = st.sidebar.slider("Select a number of keywords", 1, 10, 5, 1) -min_ngram = st.sidebar.number_input("Minimum number of words in each keyword", 1, 5, 1, 1) -max_ngram = st.sidebar.number_input("Maximum number of words in each keyword", min_ngram, 5, 3, step=1) -st.sidebar.code(f"ngram_range=({min_ngram}, {max_ngram})") - -params = {"docs": text_input, "top_n": top_n, "keyphrase_ngram_range": (min_ngram, max_ngram), "stop_words": 'english'} - -add_diversity = st.sidebar.checkbox("Adjust diversity of keywords") - -if add_diversity: - method = st.sidebar.selectbox("Select a method", ("Max Sum Similarity", "Maximal Marginal Relevance")) - if method == "Max Sum Similarity": - nr_candidates = st.sidebar.slider("nr_candidates", 20, 50, 20, 2) - params["use_maxsum"] = True - params["nr_candidates"] = nr_candidates - - elif method == "Maximal Marginal Relevance": - diversity = st.sidebar.slider("diversity", 0.1, 1.0, 0.6, 0.01) - params["use_mmr"] = True - params["diversity"] = diversity - -keywords = model.extract_keywords(**params) - -if keywords != []: - st.info("Extracted keywords") - keywords = pd.DataFrame(keywords, columns=["keyword", "relevance"]) - st.table(keywords) - diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/deprecation_utils.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/deprecation_utils.py deleted file mode 100644 index 6bdda664e102ea9913503b9e169fa97225d52c78..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/deprecation_utils.py +++ /dev/null @@ -1,49 +0,0 @@ -import inspect -import warnings -from typing import Any, Dict, Optional, Union - -from packaging import version - - -def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True): - from .. import __version__ - - deprecated_kwargs = take_from - values = () - if not isinstance(args[0], tuple): - args = (args,) - - for attribute, version_name, message in args: - if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): - raise ValueError( - f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" - f" version {__version__} is >= {version_name}" - ) - - warning = None - if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: - values += (deprecated_kwargs.pop(attribute),) - warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." - elif hasattr(deprecated_kwargs, attribute): - values += (getattr(deprecated_kwargs, attribute),) - warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." - elif deprecated_kwargs is None: - warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." - - if warning is not None: - warning = warning + " " if standard_warn else "" - warnings.warn(warning + message, FutureWarning, stacklevel=2) - - if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: - call_frame = inspect.getouterframes(inspect.currentframe())[1] - filename = call_frame.filename - line_number = call_frame.lineno - function = call_frame.function - key, value = next(iter(deprecated_kwargs.items())) - raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") - - if len(values) == 0: - return - elif len(values) == 1: - return values[0] - return values diff --git a/spaces/Yudha515/Rvc-Models/README.md b/spaces/Yudha515/Rvc-Models/README.md deleted file mode 100644 index e36f3c1f8803b85b58ec328405b0195fb7347829..0000000000000000000000000000000000000000 --- a/spaces/Yudha515/Rvc-Models/README.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: MusicGen -python_version: '3.9' -tags: -- music generation -- language models -- LLMs -app_file: app.py -emoji: 🎵 -colorFrom: white -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -pinned: true -license: cc-by-nc-4.0 -duplicated_from: facebook/MusicGen ---- -# Audiocraft -![docs badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_docs/badge.svg) -![linter badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_linter/badge.svg) -![tests badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_tests/badge.svg) - -Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model. - -## MusicGen - -Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive -Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't require a self-supervised semantic representation, and it generates -all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict -them in parallel, thus having only 50 auto-regressive steps per second of audio. -Check out our [sample page][musicgen_samples] or test the available demo! - - - Open In Colab - - - Open in HugginFace - -
      - -We use 20K hours of licensed music to train MusicGen. Specifically, we rely on an internal dataset of 10K high-quality music tracks, and on the ShutterStock and Pond5 music data. - -## Installation -Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following: - -```shell -# Best to make sure you have torch installed first, in particular before installing xformers. -# Don't run this if you already have PyTorch installed. -pip install 'torch>=2.0' -# Then proceed to one of the following -pip install -U audiocraft # stable release -pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft # bleeding edge -pip install -e . # or if you cloned the repo locally -``` - -## Usage -We offer a number of way to interact with MusicGen: -1. A demo is also available on the [`facebook/MusicGen` HuggingFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support). -2. You can run the Gradio demo in Colab: [colab notebook](https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing). -3. You can use the gradio demo locally by running `python app.py`. -4. You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally (if you have a GPU). -5. Finally, checkout [@camenduru Colab page](https://github.com/camenduru/MusicGen-colab) which is regularly - updated with contributions from @camenduru and the community. - -## API - -We provide a simple API and 4 pre-trained models. The pre trained models are: -- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small) -- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium) -- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody) -- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large) - -We observe the best trade-off between quality and compute with the `medium` or `melody` model. -In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller -GPUs will be able to generate short sequences, or longer sequences with the `small` model. - -**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`. -You can install it with: -``` -apt-get install ffmpeg -``` - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import MusicGen -from audiocraft.data.audio import audio_write - -model = MusicGen.get_pretrained('melody') -model.set_generation_params(duration=8) # generate 8 seconds. -wav = model.generate_unconditional(4) # generates 4 unconditional audio samples -descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] -wav = model.generate(descriptions) # generates 3 samples. - -melody, sr = torchaudio.load('./assets/bach.mp3') -# generates using the melody from the given audio and the provided descriptions. -wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr) - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) -``` - - -## Model Card - -See [the model card page](./MODEL_CARD.md). - -## FAQ - -#### Will the training code be released? - -Yes. We will soon release the training code for MusicGen and EnCodec. - - -#### I need help on Windows - -@FurkanGozukara made a complete tutorial for [Audiocraft/MusicGen on Windows](https://youtu.be/v-YpvPkhdO4) - -#### I need help for running the demo on Colab - -Check [@camenduru tutorial on Youtube](https://www.youtube.com/watch?v=EGfxuTy9Eeo). - - -## Citation -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - -## License -* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE). -* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights). - -[arxiv]: https://arxiv.org/abs/2306.05284 -[musicgen_samples]: https://ai.honu.io/papers/musicgen/ diff --git a/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.h b/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.h deleted file mode 100644 index c9e2032bcac9d2abde7a75eea4d812da348afadd..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct upfirdn2d_kernel_params -{ - const void* x; - const float* f; - void* y; - - int2 up; - int2 down; - int2 pad0; - int flip; - float gain; - - int4 inSize; // [width, height, channel, batch] - int4 inStride; - int2 filterSize; // [width, height] - int2 filterStride; - int4 outSize; // [width, height, channel, batch] - int4 outStride; - int sizeMinor; - int sizeMajor; - - int loopMinor; - int loopMajor; - int loopX; - int launchMinor; - int launchMajor; -}; - -//------------------------------------------------------------------------ -// CUDA kernel specialization. - -struct upfirdn2d_kernel_spec -{ - void* kernel; - int tileOutW; - int tileOutH; - int loopMinor; - int loopX; -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); - -//------------------------------------------------------------------------ diff --git a/spaces/Zannriell/hakurei-waifu-diffusion/app.py b/spaces/Zannriell/hakurei-waifu-diffusion/app.py deleted file mode 100644 index ccef706bf3035fe470bf6a4f5bd701b18bf59133..0000000000000000000000000000000000000000 --- a/spaces/Zannriell/hakurei-waifu-diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/hakurei/waifu-diffusion").launch() \ No newline at end of file diff --git a/spaces/abdvl/datahub_qa_bot/docs/advanced/backfilling.md b/spaces/abdvl/datahub_qa_bot/docs/advanced/backfilling.md deleted file mode 100644 index 91e2a31256c7ef22e4e3ab8bac0243792bc1d1b7..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/advanced/backfilling.md +++ /dev/null @@ -1,3 +0,0 @@ -# Backfilling Search Index & Graph DB - -WIP diff --git a/spaces/abhishek-kumar/ChatGPT4/README.md b/spaces/abhishek-kumar/ChatGPT4/README.md deleted file mode 100644 index 683432fd73f0df0790cb2225b881e5900c237d91..0000000000000000000000000000000000000000 --- a/spaces/abhishek-kumar/ChatGPT4/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ChatGPT4 -emoji: 👀 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/danet_r50-d8.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/danet_r50-d8.py deleted file mode 100644 index 2c934939fac48525f22ad86f489a041dd7db7d09..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/danet_r50-d8.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='DAHead', - in_channels=2048, - in_index=3, - channels=512, - pam_channels=64, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py deleted file mode 100644 index b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pickle - -from .base import BaseFileHandler - - -class PickleHandler(BaseFileHandler): - - str_like = False - - def load_from_fileobj(self, file, **kwargs): - return pickle.load(file, **kwargs) - - def load_from_path(self, filepath, **kwargs): - return super(PickleHandler, self).load_from_path( - filepath, mode='rb', **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault('protocol', 2) - return pickle.dumps(obj, **kwargs) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault('protocol', 2) - pickle.dump(obj, file, **kwargs) - - def dump_to_path(self, obj, filepath, **kwargs): - super(PickleHandler, self).dump_to_path( - obj, filepath, mode='wb', **kwargs) diff --git a/spaces/abidlabs/Draw/README.md b/spaces/abidlabs/Draw/README.md deleted file mode 100644 index 02bd5a05da58c8ba7f81964f66572b5f2f39a09f..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/Draw/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Quickdraw -emoji: 💻 -colorFrom: blue -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/gdkpixbuf2.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/gdkpixbuf2.py deleted file mode 100644 index 8b88a2c24d063e544bf4858e1dab277a9bbbb4e0..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/gdkpixbuf2.py +++ /dev/null @@ -1,297 +0,0 @@ -from ctypes import * - -from pyglet.gl import * -from pyglet.image import * -from pyglet.image.codecs import * -from pyglet.image.codecs import gif - -import pyglet.lib -import pyglet.window - -gdk = pyglet.lib.load_library('gdk-x11-2.0') -gdkpixbuf = pyglet.lib.load_library('gdk_pixbuf-2.0') - -GdkPixbufLoader = c_void_p -GdkPixbuf = c_void_p -guchar = c_char -gdkpixbuf.gdk_pixbuf_loader_new.restype = POINTER(GdkPixbufLoader) -gdkpixbuf.gdk_pixbuf_loader_get_pixbuf.restype = POINTER(GdkPixbuf) -gdkpixbuf.gdk_pixbuf_get_pixels.restype = POINTER(guchar) -gdkpixbuf.gdk_pixbuf_loader_get_animation.restype = POINTER(c_void_p) -gdkpixbuf.gdk_pixbuf_animation_get_iter.restype = POINTER(c_void_p) -gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf.restype = POINTER(GdkPixbuf) - - -class GTimeVal(Structure): - _fields_ = [ - ('tv_sec', c_long), - ('tv_usec', c_long) - ] - - -GQuark = c_uint32 -gint = c_int -gchar = c_char - - -class GError(Structure): - _fields_ = [ - ('domain', GQuark), - ('code', gint), - ('message', POINTER(gchar)) - ] - -gerror_ptr = POINTER(GError) - -def _gerror_to_string(error): - """ - Convert a GError to a string. - `error` should be a valid pointer to a GError struct. - """ - return 'GdkPixBuf Error: domain[{}], code[{}]: {}'.format(error.contents.domain, - error.contents.code, - error.contents.message) - - -class GdkPixBufLoader: - """ - Wrapper around GdkPixBufLoader object. - """ - def __init__(self, filename, file): - self.closed = False - self._file = file - self._filename = filename - self._loader = gdkpixbuf.gdk_pixbuf_loader_new() - if self._loader is None: - raise ImageDecodeException('Unable to instantiate gdk pixbuf loader') - self._load_file() - - def __del__(self): - if self._loader is not None: - if not self.closed: - self._cancel_load() - gdk.g_object_unref(self._loader) - - def _load_file(self): - self._file.seek(0) - data = self._file.read() - self.write(data) - - def _finish_load(self): - assert not self.closed - error = gerror_ptr() - all_data_passed = gdkpixbuf.gdk_pixbuf_loader_close(self._loader, byref(error)) - self.closed = True - if not all_data_passed: - raise ImageDecodeException(_gerror_to_string(error)) - - def _cancel_load(self): - assert not self.closed - gdkpixbuf.gdk_pixbuf_loader_close(self._loader, None) - self.closed = True - - def write(self, data): - assert not self.closed, 'Cannot write after closing loader' - error = gerror_ptr() - if not gdkpixbuf.gdk_pixbuf_loader_write(self._loader, data, len(data), byref(error)): - raise ImageDecodeException(_gerror_to_string(error)) - - def get_pixbuf(self): - self._finish_load() - pixbuf = gdkpixbuf.gdk_pixbuf_loader_get_pixbuf(self._loader) - if pixbuf is None: - raise ImageDecodeException('Failed to get pixbuf from loader') - return GdkPixBuf(self, pixbuf) - - def get_animation(self): - self._finish_load() - anim = gdkpixbuf.gdk_pixbuf_loader_get_animation(self._loader) - if anim is None: - raise ImageDecodeException('Failed to get animation from loader') - gif_delays = self._get_gif_delays() - return GdkPixBufAnimation(self, anim, gif_delays) - - def _get_gif_delays(self): - # GDK pixbuf animations will loop indefinitely if looping is enabled for the - # gif, so get number of frames and delays from gif metadata - assert self._file is not None - self._file.seek(0) - gif_stream = gif.read(self._file) - return [image.delay for image in gif_stream.images] - - -class GdkPixBuf: - """ - Wrapper around GdkPixBuf object. - """ - def __init__(self, loader, pixbuf): - # Keep reference to loader alive - self._loader = loader - self._pixbuf = pixbuf - gdk.g_object_ref(pixbuf) - - def __del__(self): - if self._pixbuf is not None: - gdk.g_object_unref(self._pixbuf) - - def load_next(self): - return self._pixbuf is not None - - @property - def width(self): - assert self._pixbuf is not None - return gdkpixbuf.gdk_pixbuf_get_width(self._pixbuf) - - @property - def height(self): - assert self._pixbuf is not None - return gdkpixbuf.gdk_pixbuf_get_height(self._pixbuf) - - @property - def channels(self): - assert self._pixbuf is not None - return gdkpixbuf.gdk_pixbuf_get_n_channels(self._pixbuf) - - @property - def rowstride(self): - assert self._pixbuf is not None - return gdkpixbuf.gdk_pixbuf_get_rowstride(self._pixbuf) - - @property - def has_alpha(self): - assert self._pixbuf is not None - return gdkpixbuf.gdk_pixbuf_get_has_alpha(self._pixbuf) == 1 - - def get_pixels(self): - pixels = gdkpixbuf.gdk_pixbuf_get_pixels(self._pixbuf) - assert pixels is not None - buf = (c_ubyte * (self.rowstride * self.height))() - memmove(buf, pixels, self.rowstride * (self.height - 1) + self.width * self.channels) - return buf - - def to_image(self): - if self.width < 1 or self.height < 1 or self.channels < 1 or self.rowstride < 1: - return None - - pixels = self.get_pixels() - - # Determine appropriate GL type - if self.channels == 3: - format = 'RGB' - else: - format = 'RGBA' - - return ImageData(self.width, self.height, format, pixels, -self.rowstride) - - -class GdkPixBufAnimation: - """ - Wrapper for a GdkPixBufIter for an animation. - """ - def __init__(self, loader, anim, gif_delays): - self._loader = loader - self._anim = anim - self._gif_delays = gif_delays - gdk.g_object_ref(anim) - - def __del__(self): - if self._anim is not None: - gdk.g_object_unref(self._anim) - - def __iter__(self): - time = GTimeVal(0, 0) - anim_iter = gdkpixbuf.gdk_pixbuf_animation_get_iter(self._anim, byref(time)) - return GdkPixBufAnimationIterator(self._loader, anim_iter, time, self._gif_delays) - - def to_animation(self): - return Animation(list(self)) - - -class GdkPixBufAnimationIterator: - def __init__(self, loader, anim_iter, start_time, gif_delays): - self._iter = anim_iter - self._first = True - self._time = start_time - self._loader = loader - self._gif_delays = gif_delays - self.delay_time = None - - def __del__(self): - if self._iter is not None: - gdk.g_object_unref(self._iter) - # The pixbuf returned by the iter is owned by the iter, so no need to destroy that one - - def __iter__(self): - return self - - def __next__(self): - self._advance() - frame = self.get_frame() - if frame is None: - raise StopIteration - return frame - - def _advance(self): - if not self._gif_delays: - raise StopIteration - self.delay_time = self._gif_delays.pop(0) - - if self._first: - self._first = False - else: - if self.gdk_delay_time == -1: - raise StopIteration - else: - gdk_delay = self.gdk_delay_time * 1000 # milliseconds to microseconds - us = self._time.tv_usec + gdk_delay - self._time.tv_sec += us // 1000000 - self._time.tv_usec = us % 1000000 - gdkpixbuf.gdk_pixbuf_animation_iter_advance(self._iter, byref(self._time)) - - def get_frame(self): - pixbuf = gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf(self._iter) - if pixbuf is None: - return None - image = GdkPixBuf(self._loader, pixbuf).to_image() - return AnimationFrame(image, self.delay_time) - - @property - def gdk_delay_time(self): - assert self._iter is not None - return gdkpixbuf.gdk_pixbuf_animation_iter_get_delay_time(self._iter) - - -class GdkPixbuf2ImageDecoder(ImageDecoder): - def get_file_extensions(self): - return ['.png', '.xpm', '.jpg', '.jpeg', '.tif', '.tiff', '.pnm', - '.ras', '.bmp', '.gif'] - - def get_animation_file_extensions(self): - return ['.gif', '.ani'] - - def decode(self, filename, file): - if not file: - file = open(filename, 'rb') - loader = GdkPixBufLoader(filename, file) - return loader.get_pixbuf().to_image() - - def decode_animation(self, filename, file): - if not file: - file = open(filename, 'rb') - loader = GdkPixBufLoader(filename, file) - return loader.get_animation().to_animation() - - -def get_decoders(): - return [GdkPixbuf2ImageDecoder()] - - -def get_encoders(): - return [] - - -def init(): - gdk.g_type_init() - - -init() diff --git a/spaces/akhaliq/Mask2Former/mask2former/utils/__init__.py b/spaces/akhaliq/Mask2Former/mask2former/utils/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/parallel/__init__.py b/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/parallel/__init__.py deleted file mode 100644 index 9b52f49cc0755562218a460483cbf02514ddd773..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/parallel/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .data_parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to diff --git a/spaces/akhaliq/neural-waveshaping-synthesis/neural_waveshaping_synthesis/models/modules/shaping.py b/spaces/akhaliq/neural-waveshaping-synthesis/neural_waveshaping_synthesis/models/modules/shaping.py deleted file mode 100644 index de04c9e25c029f40f8d976d1af0571e243eca00a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/neural-waveshaping-synthesis/neural_waveshaping_synthesis/models/modules/shaping.py +++ /dev/null @@ -1,173 +0,0 @@ -import gin -import torch -import torch.fft -import torch.nn as nn -import torch.nn.functional as F - -from .dynamic import FiLM, TimeDistributedMLP - - -class Sine(nn.Module): - def forward(self, x: torch.Tensor): - return torch.sin(x) - - -@gin.configurable -class TrainableNonlinearity(nn.Module): - def __init__( - self, channels, width, nonlinearity=nn.ReLU, final_nonlinearity=Sine, depth=3 - ): - super().__init__() - self.input_scale = nn.Parameter(torch.randn(1, channels, 1) * 10) - layers = [] - for i in range(depth): - layers.append( - nn.Conv1d( - channels if i == 0 else channels * width, - channels * width if i < depth - 1 else channels, - 1, - groups=channels, - ) - ) - layers.append(nonlinearity() if i < depth - 1 else final_nonlinearity()) - - self.net = nn.Sequential(*layers) - - def forward(self, x): - return self.net(self.input_scale * x) - - -@gin.configurable -class NEWT(nn.Module): - def __init__( - self, - n_waveshapers: int, - control_embedding_size: int, - shaping_fn_size: int = 16, - out_channels: int = 1, - ): - super().__init__() - - self.n_waveshapers = n_waveshapers - - self.mlp = TimeDistributedMLP( - control_embedding_size, control_embedding_size, n_waveshapers * 4, depth=4 - ) - - self.waveshaping_index = FiLM() - self.shaping_fn = TrainableNonlinearity( - n_waveshapers, shaping_fn_size, nonlinearity=Sine - ) - self.normalising_coeff = FiLM() - - self.mixer = nn.Sequential( - nn.Conv1d(n_waveshapers, out_channels, 1), - ) - - def forward(self, exciter, control_embedding): - film_params = self.mlp(control_embedding) - film_params = F.upsample(film_params, exciter.shape[-1], mode="linear") - gamma_index, beta_index, gamma_norm, beta_norm = torch.split( - film_params, self.n_waveshapers, 1 - ) - - x = self.waveshaping_index(exciter, gamma_index, beta_index) - x = self.shaping_fn(x) - x = self.normalising_coeff(x, gamma_norm, beta_norm) - - # return x - return self.mixer(x) - - -class FastNEWT(NEWT): - def __init__( - self, - newt: NEWT, - table_size: int = 4096, - table_min: float = -3.0, - table_max: float = 3.0, - ): - super().__init__() - self.table_size = table_size - self.table_min = table_min - self.table_max = table_max - - self.n_waveshapers = newt.n_waveshapers - self.mlp = newt.mlp - - self.waveshaping_index = newt.waveshaping_index - self.normalising_coeff = newt.normalising_coeff - self.mixer = newt.mixer - - self.lookup_table = self._init_lookup_table( - newt, table_size, self.n_waveshapers, table_min, table_max - ) - self.to(next(iter(newt.parameters())).device) - - def _init_lookup_table( - self, - newt: NEWT, - table_size: int, - n_waveshapers: int, - table_min: float, - table_max: float, - ): - sample_values = torch.linspace(table_min, table_max, table_size, device=next(iter(newt.parameters())).device).expand( - 1, n_waveshapers, table_size - ) - lookup_table = newt.shaping_fn(sample_values)[0] - return nn.Parameter(lookup_table) - - def _lookup(self, idx): - return torch.stack( - [ - torch.stack( - [ - self.lookup_table[shaper, idx[batch, shaper]] - for shaper in range(idx.shape[1]) - ], - dim=0, - ) - for batch in range(idx.shape[0]) - ], - dim=0, - ) - - def shaping_fn(self, x): - idx = self.table_size * (x - self.table_min) / (self.table_max - self.table_min) - - lower = torch.floor(idx).long() - lower[lower < 0] = 0 - lower[lower >= self.table_size] = self.table_size - 1 - - upper = lower + 1 - upper[upper >= self.table_size] = self.table_size - 1 - - fract = idx - lower - lower_v = self._lookup(lower) - upper_v = self._lookup(upper) - - output = (upper_v - lower_v) * fract + lower_v - return output - - -@gin.configurable -class Reverb(nn.Module): - def __init__(self, length_in_seconds, sr): - super().__init__() - self.ir = nn.Parameter(torch.randn(1, sr * length_in_seconds - 1) * 1e-6) - self.register_buffer("initial_zero", torch.zeros(1, 1)) - - def forward(self, x): - ir_ = torch.cat((self.initial_zero, self.ir), dim=-1) - if x.shape[-1] > ir_.shape[-1]: - ir_ = F.pad(ir_, (0, x.shape[-1] - ir_.shape[-1])) - x_ = x - else: - x_ = F.pad(x, (0, ir_.shape[-1] - x.shape[-1])) - return ( - x - + torch.fft.irfft(torch.fft.rfft(x_) * torch.fft.rfft(ir_))[ - ..., : x.shape[-1] - ] - ) diff --git a/spaces/alex-mindspace/gpt-agents/swarmai/utils/task_queue/TaskQueueBase.py b/spaces/alex-mindspace/gpt-agents/swarmai/utils/task_queue/TaskQueueBase.py deleted file mode 100644 index 5d9e80945f4631a8923c71734c1a8519e39271e3..0000000000000000000000000000000000000000 --- a/spaces/alex-mindspace/gpt-agents/swarmai/utils/task_queue/TaskQueueBase.py +++ /dev/null @@ -1,58 +0,0 @@ -import threading -from abc import ABC, abstractmethod - -from swarmai.utils.task_queue.Task import Task -from swarmai.agents.AgentBase import AgentBase - -def synchronized_queue(method): - timeout_sec = 5 - def wrapper(self, *args, **kwargs): - with self.lock: - self.lock.acquire(timeout = timeout_sec) - try: - return method(self, *args, **kwargs) - except Exception as e: - print(f"Failed to execute {method.__name__}: {e}") - finally: - self.lock.release() - return wrapper - - -class TaskQueueBase(ABC): - """Abstract class for the Task Queue object. - We can have different implementation of the task queues: from simple queue, to the custom priority queue. - Not every implementatino is inherently thread safe, so we also put the locks here. - - Made a pull queue, just for the ease of implementation. - """ - def __init__(self): - self.lock = threading.Lock() - - @synchronized_queue - @abstractmethod - def add_task(self, taks: Task) -> bool: - """Adds a task to the queue. - """ - raise NotImplementedError - - @synchronized_queue - @abstractmethod - def get_task(self, agent: AgentBase) -> Task: - """Gets the next task from the queue. - """ - raise NotImplementedError - - @synchronized_queue - @abstractmethod - def complete_task(self, task_id: str): - """Sets the task as completed. - """ - raise NotImplementedError - - @synchronized_queue - @abstractmethod - def reset_task(self, task_id: str): - """Resets the task if the agent failed to complete it. - """ - raise NotImplementedError - diff --git a/spaces/allknowingroger/Image-Models-Test52/app.py b/spaces/allknowingroger/Image-Models-Test52/app.py deleted file mode 100644 index 75f847839f9aed9cbbeb6489e001c18b48182c40..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test52/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "RadonDong/LUSC_white_formalin", - "aff1/pichanabooth", - "Yntec/QToriReloaded", - "sbrandeis/stable-diffusion-laplagne-2", - "digiplay/LuckyStrikeMix1.05_Lovelylady", - "digiplay/fantasticmix_v30_test", - "oljike/nurtas_db_lora", - "LinoyTsaban/web_y2k_v1", - "juliajoanna/lora-trained-xl-fred-155", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test70/README.md b/spaces/allknowingroger/Image-Models-Test70/README.md deleted file mode 100644 index dd6d153c1b4300376a1ad0030f2318f2369781df..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test70/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test69 ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/text-generation-webui-space-1/modules/ui.py b/spaces/allknowingroger/text-generation-webui-space-1/modules/ui.py deleted file mode 100644 index bb193e35c11b2a3d474ea89e7567206a3343395a..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/text-generation-webui-space-1/modules/ui.py +++ /dev/null @@ -1,92 +0,0 @@ -import gradio as gr - -refresh_symbol = '\U0001f504' # 🔄 - -css = """ -.tabs.svelte-710i53 { - margin-top: 0 -} -.py-6 { - padding-top: 2.5rem -} -.dark #refresh-button { - background-color: #ffffff1f; -} -#refresh-button { - flex: none; - margin: 0; - padding: 0; - min-width: 50px; - border: none; - box-shadow: none; - border-radius: 10px; - background-color: #0000000d; -} -#download-label, #upload-label { - min-height: 0 -} -#accordion { -} -.dark svg { - fill: white; -} -svg { - display: unset !important; - vertical-align: middle !important; - margin: 5px; -} -ol li p, ul li p { - display: inline-block; -} -""" - -chat_css = """ -.h-\[40vh\], .wrap.svelte-byatnx.svelte-byatnx.svelte-byatnx { - height: 66.67vh -} -.gradio-container { - max-width: 800px !important; - margin-left: auto !important; - margin-right: auto !important; -} -.w-screen { - width: unset -} -div.svelte-362y77>*, div.svelte-362y77>.form>* { - flex-wrap: nowrap -} -/* fixes the API documentation in chat mode */ -.api-docs.svelte-1iguv9h.svelte-1iguv9h.svelte-1iguv9h { - display: grid; -} -.pending.svelte-1ed2p3z { - opacity: 1; -} -""" - -class ToolButton(gr.Button, gr.components.FormComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(variant="tool", **kwargs) - - def get_block_name(self): - return "button" - -def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): - def refresh(): - refresh_method() - args = refreshed_args() if callable(refreshed_args) else refreshed_args - - for k, v in args.items(): - setattr(refresh_component, k, v) - - return gr.update(**(args or {})) - - refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id) - refresh_button.click( - fn=refresh, - inputs=[], - outputs=[refresh_component] - ) - return refresh_button diff --git a/spaces/aodianyun/stable-diffusion-webui/run.py b/spaces/aodianyun/stable-diffusion-webui/run.py deleted file mode 100644 index f7788fb90a3dfa7bcb50d0e8bab83f3a6eaa65fc..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/run.py +++ /dev/null @@ -1,6 +0,0 @@ -# import os - -# if not os.path.exists("extensions/deforum"): -# exec(open("deforum.sh").read()) - -#exec(open("run.sh").read()) diff --git a/spaces/arbml/Ashaar/poetry_diacritizer/models/baseline.py b/spaces/arbml/Ashaar/poetry_diacritizer/models/baseline.py deleted file mode 100644 index 1b1e2c6ccb2160e394ecde108020689d7cf30290..0000000000000000000000000000000000000000 --- a/spaces/arbml/Ashaar/poetry_diacritizer/models/baseline.py +++ /dev/null @@ -1,60 +0,0 @@ -from typing import List -from torch import nn -import torch - - -class BaseLineModel(nn.Module): - def __init__( - self, - inp_vocab_size: int, - targ_vocab_size: int, - embedding_dim: int = 512, - layers_units: List[int] = [256, 256, 256], - use_batch_norm: bool = False, - ): - super().__init__() - self.targ_vocab_size = targ_vocab_size - self.embedding = nn.Embedding(inp_vocab_size, embedding_dim) - - layers_units = [embedding_dim // 2] + layers_units - - layers = [] - - for i in range(1, len(layers_units)): - layers.append( - nn.LSTM( - layers_units[i - 1] * 2, - layers_units[i], - bidirectional=True, - batch_first=True, - ) - ) - if use_batch_norm: - layers.append(nn.BatchNorm1d(layers_units[i] * 2)) - - self.layers = nn.ModuleList(layers) - self.projections = nn.Linear(layers_units[-1] * 2, targ_vocab_size) - self.layers_units = layers_units - self.use_batch_norm = use_batch_norm - - def forward(self, src: torch.Tensor, lengths: torch.Tensor, target=None): - - outputs = self.embedding(src) - - # embedded_inputs = [batch_size, src_len, embedding_dim] - - for i, layer in enumerate(self.layers): - if isinstance(layer, nn.BatchNorm1d): - outputs = layer(outputs.permute(0, 2, 1)) - outputs = outputs.permute(0, 2, 1) - continue - if i > 0: - outputs, (hn, cn) = layer(outputs, (hn, cn)) - else: - outputs, (hn, cn) = layer(outputs) - - predictions = self.projections(outputs) - - output = {"diacritics": predictions} - - return output diff --git a/spaces/arbml/Ashaar/poetry_diacritizer/models/seq2seq.py b/spaces/arbml/Ashaar/poetry_diacritizer/models/seq2seq.py deleted file mode 100644 index 5fcf05c5cfb6087d90601b246a1235c47ded6903..0000000000000000000000000000000000000000 --- a/spaces/arbml/Ashaar/poetry_diacritizer/models/seq2seq.py +++ /dev/null @@ -1,277 +0,0 @@ -from typing import List -from typing import List, Optional - -import torch -from torch import nn -from torch.autograd import Variable - -from poetry_diacritizer.modules.attention import AttentionWrapper -from poetry_diacritizer.modules.layers import ConvNorm -from poetry_diacritizer.modules.tacotron_modules import CBHG, Prenet -from poetry_diacritizer.options import AttentionType -from poetry_diacritizer.util.utils import get_mask_from_lengths - - -class Seq2Seq(nn.Module): - def __init__(self, encoder: nn.Module, decoder: nn.Module): - super().__init__() - # Trying smaller std - self.encoder = encoder - self.decoder = decoder - - def forward( - self, - src: torch.Tensor, - lengths: torch.Tensor, - target: Optional[torch.Tensor] = None, - ): - - encoder_outputs = self.encoder(src, lengths) - mask = get_mask_from_lengths(encoder_outputs, lengths) - outputs, alignments = self.decoder(encoder_outputs, target, mask) - - output = {"diacritics": outputs, "attention": alignments} - - return output - - -class Encoder(nn.Module): - def __init__( - self, - inp_vocab_size: int, - embedding_dim: int = 512, - layers_units: List[int] = [256, 256, 256], - use_batch_norm: bool = False, - ): - super().__init__() - self.embedding = nn.Embedding(inp_vocab_size, embedding_dim) - - layers_units = [embedding_dim // 2] + layers_units - - layers = [] - - for i in range(1, len(layers_units)): - layers.append( - nn.LSTM( - layers_units[i - 1] * 2, - layers_units[i], - bidirectional=True, - batch_first=True, - ) - ) - if use_batch_norm: - layers.append(nn.BatchNorm1d(layers_units[i] * 2)) - - self.layers = nn.ModuleList(layers) - self.layers_units = layers_units - self.use_batch_norm = use_batch_norm - - def forward(self, inputs: torch.Tensor, inputs_lengths: torch.Tensor): - - outputs = self.embedding(inputs) - - # embedded_inputs = [batch_size, src_len, embedding_dim] - - for i, layer in enumerate(self.layers): - if isinstance(layer, nn.BatchNorm1d): - outputs = layer(outputs.permute(0, 2, 1)) - outputs = outputs.permute(0, 2, 1) - continue - if i > 0: - outputs, (hn, cn) = layer(outputs, (hn, cn)) - else: - outputs, (hn, cn) = layer(outputs) - - return outputs - -class Decoder(nn.Module): - """A seq2seq decoder that decode a diacritic at a time , - Args: - encoder_dim (int): the encoder output dim - decoder_units (int): the number of neurons for each decoder layer - decoder_layers (int): number of decoder layers - """ - - def __init__( - self, - trg_vocab_size: int, - start_symbol_id: int, - encoder_dim: int = 256, - embedding_dim: int = 256, - decoder_units: int = 256, - decoder_layers: int = 2, - attention_units: int = 256, - attention_type: AttentionType = AttentionType.LocationSensitive, - is_attention_accumulative: bool = False, - prenet_depth: List[int] = [256, 128], - use_prenet: bool = True, - teacher_forcing_probability: float = 0.0, - ): - super().__init__() - - self.output_dim: int = trg_vocab_size - self.start_symbol_id = start_symbol_id - self.attention_units = attention_units - self.decoder_units = decoder_units - self.encoder_dim = encoder_dim - self.use_prenet = use_prenet - self.teacher_forcing_probability = teacher_forcing_probability - self.is_attention_accumulative = is_attention_accumulative - self.embbeding = nn.Embedding(trg_vocab_size, embedding_dim, padding_idx=0) - attention_in = embedding_dim - if use_prenet: - self.prenet = Prenet(embedding_dim, prenet_depth) - attention_in = prenet_depth[-1] - - self.attention_layer = nn.GRUCell(encoder_dim + attention_in, attention_units) - self.attention_wrapper = AttentionWrapper(attention_type, attention_units) - self.keys_layer = nn.Linear(encoder_dim, attention_units, bias=False) - self.project_to_decoder_in = nn.Linear( - attention_units + encoder_dim, - decoder_units, - ) - - self.decoder_rnns = nn.ModuleList( - [nn.GRUCell(decoder_units, decoder_units) for _ in range(decoder_layers)] - ) - - self.diacritics_layer = nn.Linear(decoder_units, trg_vocab_size) - self.device = "cuda" - - def decode( - self, - diacritic: torch.Tensor, - ): - """ - Decode one time-step - Args: - diacritic (Tensor): (batch_size, 1) - Returns: - """ - - diacritic = self.embbeding(diacritic) - if self.use_prenet: - prenet_out = self.prenet(diacritic) - else: - prenet_out = diacritic - - cell_input = torch.cat((prenet_out, self.prev_attention), -1) - - self.attention_hidden = self.attention_layer(cell_input, self.attention_hidden) - output = self.attention_hidden - - # The queries are the hidden state of the RNN layer - attention, alignment = self.attention_wrapper( - query=self.attention_hidden, - values=self.encoder_outputs, - keys=self.keys, - mask=self.mask, - prev_alignment=self.prev_alignment, - ) - - decoder_input = torch.cat((output, attention), -1) - - decoder_input = self.project_to_decoder_in(decoder_input) - - for idx in range(len(self.decoder_rnns)): - self.decoder_hiddens[idx] = self.decoder_rnns[idx]( - decoder_input, self.decoder_hiddens[idx] - ) - decoder_input = self.decoder_hiddens[idx] + decoder_input - - output = decoder_input - - output = self.diacritics_layer(output) - - if self.is_attention_accumulative: - self.prev_alignment = self.prev_alignment + alignment - else: - self.prev_alignment = alignment - - self.prev_attention = attention - - return output, alignment - - def inference(self): - """Generate diacritics one at a time""" - batch_size = self.encoder_outputs.size(0) - trg_len = self.encoder_outputs.size(1) - diacritic = ( - torch.full((batch_size,), self.start_symbol_id).to(self.device).long() - ) - outputs, alignments = [], [] - self.initialize() - - for _ in range(trg_len): - output, alignment = self.decode(diacritic=diacritic) - - outputs.append(output) - alignments.append(alignment) - diacritic = torch.max(output, 1).indices - - alignments = torch.stack(alignments).transpose(0, 1) - outputs = torch.stack(outputs).transpose(0, 1).contiguous() - return outputs, alignments - - def forward( - self, - encoder_outputs: torch.Tensor, - diacritics: Optional[torch.Tensor] = None, - input_mask: Optional[torch.Tensor] = None, - ): - """calculate forward propagation - Args: - encoder_outputs (Tensor): the output of the encoder - (batch_size, Tx, encoder_units * 2) - diacritics(Tensor): target sequence - input_mask (Tensor): the inputs mask (batch_size, Tx) - """ - self.mask = input_mask - self.encoder_outputs = encoder_outputs - self.keys = self.keys_layer(encoder_outputs) - - if diacritics is None: - return self.inference() - - batch_size = diacritics.size(0) - trg_len = diacritics.size(1) - - # Init decoder states - outputs = [] - alignments = [] - - self.initialize() - - diacritic = ( - torch.full((batch_size,), self.start_symbol_id).to(self.device).long() - ) - - for time in range(trg_len): - output, alignment = self.decode(diacritic=diacritic) - outputs += [output] - alignments += [alignment] - #if random.random() > self.teacher_forcing_probability: - diacritic = diacritics[:, time] # use training input - #else: - #diacritic = torch.max(output, 1).indices # use last output - - alignments = torch.stack(alignments).transpose(0, 1) - outputs = torch.stack(outputs).transpose(0, 1).contiguous() - - return outputs, alignments - - def initialize(self): - """Initialize the first step variables""" - batch_size = self.encoder_outputs.size(0) - src_len = self.encoder_outputs.size(1) - self.attention_hidden = Variable( - torch.zeros(batch_size, self.attention_units) - ).to(self.device) - self.decoder_hiddens = [ - Variable(torch.zeros(batch_size, self.decoder_units)).to(self.device) - for _ in range(len(self.decoder_rnns)) - ] - self.prev_attention = Variable(torch.zeros(batch_size, self.encoder_dim)).to( - self.device - ) - self.prev_alignment = Variable(torch.zeros(batch_size, src_len)).to(self.device) diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/english/__init__.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/english/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ashercn97/AsherTesting/extensions/whisper_stt/readme.md b/spaces/ashercn97/AsherTesting/extensions/whisper_stt/readme.md deleted file mode 100644 index cd9abbf68cb4f7adf1172fdd57e9e26466e47778..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/extensions/whisper_stt/readme.md +++ /dev/null @@ -1,15 +0,0 @@ -# whisper_stt - -Allows you to enter your inputs in chat mode using your microphone. - -## Settings - -To adjust your default settings, you can add the following to your settings.yaml file. - -``` -whisper_stt-whipser_language: chinese -whisper_stt-whipser_model: tiny -whisper_stt-auto_submit: False -``` - -See source documentation for [model names](https://github.com/openai/whisper#available-models-and-languages) and (languages)[https://github.com/openai/whisper/blob/main/whisper/tokenizer.py] you can use. \ No newline at end of file diff --git a/spaces/awacke1/HEDIS.Roster.Dash.Component.Service/app.py b/spaces/awacke1/HEDIS.Roster.Dash.Component.Service/app.py deleted file mode 100644 index fdc56957b6ec799dd26894211577fe278076de43..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HEDIS.Roster.Dash.Component.Service/app.py +++ /dev/null @@ -1,318 +0,0 @@ -import streamlit as st - -data = [ - { - "Condition": "Pain", - "Question": "Do you have any pain or discomfort?", - "CT Code": "SNOMED CT: 22253000", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=22253000&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Depression", - "Question": "Have you been feeling down, depressed, or hopeless?", - "CT Code": "SNOMED CT: 35489007", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=35489007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Anxiety", - "Question": "Have you been feeling nervous, anxious, or on edge?", - "CT Code": "SNOMED CT: 197480006", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=197480006&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Sleep problems", - "Question": "Have you been having trouble sleeping?", - "CT Code": "SNOMED CT: 309087008", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=309087008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Fatigue", - "Question": "Have you been feeling tired or worn out?", - "CT Code": "SNOMED CT: 84229001", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=84229001&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Mobility problems", - "Question": "Do you have any difficulty walking or moving around?", - "CT Code": "SNOMED CT: 288939007", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=288939007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Incontinence", - "Question": "Do you have any problems with bladder or bowel control?", - "CT Code": "SNOMED CT: 48694002", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=48694002&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" - }, - { - "Condition": "Memory problems", - "Question": "Have you been having trouble remembering things?", - "CT Code": "SNOMED CT: 386807006", - "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=386807006&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Vision problems", -"Question": "Do you have any problems with your vision?", -"CT Code": "SNOMED CT: 246636008", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=246636008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Hearing problems", -"Question": "Do you have any problems with your hearing?", -"CT Code": "SNOMED CT: 405729008", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=405729008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Breathing problems", -"Question": "Have you been having trouble breathing?", -"CT Code": "SNOMED CT: 267036007", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=267036007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Cognitive impairment", -"Question": "Have you been having difficulty thinking or making decisions?", -"CT Code": "SNOMED CT: 373930000", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=373930000&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Social isolation", -"Question": "Do you feel lonely or isolated from others?", -"CT Code": "SNOMED CT: 160303001", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=160303001&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Nutrition problems", -"Question": "Have you been having problems with your appetite or eating?", -"CT Code": "SNOMED CT: 248490000", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=248490000&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Substance use", -"Question": "Have you been using alcohol or drugs?", -"CT Code": "SNOMED CT: 228280008", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=228280008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Safety concerns", -"Question": "Do you have any concerns about your safety or the safety of others?", -"CT Code": "SNOMED CT: 409596002", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=409596002&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Financial concerns", -"Question": "Do you have any concerns about your finances or ability to pay for care?", -"CT Code": "SNOMED CT: 721991003", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=721991003&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Transportation problems", -"Question": "Do you have any problems getting to appointments or running errands?", -"CT Code": "SNOMED CT: 405609001", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=405609001&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Family/caregiver stress", -"Question": "Do you feel overwhelmed or stressed by your family or caregiving responsibilities?", -"CT Code": "SNOMED CT: 308292007", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=308292007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -}, -{ -"Condition": "Activities of daily living (ADLs)", -"Question": "Do you have any difficulty with bathing, dressing, grooming, or other basic activities?", -"CT Code": "SNOMED CT: 410518003", -"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=410518003&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en" -} -] - -st.write("Top 20 Conditions used in Surveys and Assessments as Questions about Care Needs:") -for item in data: - st.write(f"Condition: {item['Condition']}") # gpt fail - improper indent, continue problem - st.write(f"Question: {item['Question']}") - st.write(f"CT Code: {item['CT Code']}") - st.markdown(f"VerificationURL : {item['URL']}") - st.write() - st.write("---") - - -st.markdown(""" - -Prompt: -Using the 20 point outline below, write a streamlit program that displays a table grid of the top 20 FHIR based observations. For each come up with a representative emoji and reducce reading level to grade school level. Create a CT code which is one of the following types for each: ICD10, CPT, HCPCS, SNOMED, LOINC, OMS, RxNorm. Generate all code in streamlit python as a Hedis Pro Guide. - - -Patient Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient) -Patient Date of Birth (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthdate) -Patient Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address) -Patient Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone) -Patient Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email) -Patient Gender (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient) -Encounter Date/Time (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter) -Encounter Type (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter) -Encounter Location (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter) -Provider Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner) -Provider NPI (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner) -Provider Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address) -Provider Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone) -Provider Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email) -Reason for Visit (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab) -Lab Test Result (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab) -Medication Order (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest) -Medication Administration (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest) -Allergy List (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-allergyintolerance) -Immunization Record (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-immunization) - -""") - - - -st.markdown(""" - -# Prompt: -## According to https://www.healthit.gov/ Clinical Notes are aligned to different clinical terminology codes inccluding Applicable Vocabulary Standards to HEDIS. Name top twenty USCDI Data Classes and include the code URI, the code type, and the code value. Create a python dictionary list for streamlit python which includes the markdown for the data. use st.markdown function to display the data. - -Clinical Notes are documents created by healthcare professionals that contain information about a patient's medical history, current condition, and treatment plan. They can include progress notes, discharge summaries, and consultation reports. - -The United States Core Data for Interoperability (USCDI) is a standardized set of health data classes that are required to be exchanged between different healthcare providers and systems. The top twenty USCDI data classes are: - -Patient Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient) -Patient Date of Birth (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthdate) -Patient Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address) -Patient Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone) -Patient Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email) -Patient Gender (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient) -Encounter Date/Time (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter) -Encounter Type (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter) -Encounter Location (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter) -Provider Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner) -Provider NPI (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner) -Provider Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address) -Provider Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone) -Provider Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email) -Reason for Visit (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab) -Lab Test Result (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab) -Medication Order (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest) -Medication Administration (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest) -Allergy List (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-allergyintolerance) -Immunization Record (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-immunization) - -# Prompt 2: Fail: Show as markdown code in streamlit python - -# Prompt 3: Fail: continue and show full code listing including markdown so it displays as a numbered list in st.markdown() function - -# Prompt 4: Fail: - -import streamlit as st - -data = { - 'Patient Name': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Name'}, - 'Patient Date of Birth': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthdate', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Date of Birth'}, - 'Patient Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-address', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Address'}, - 'Patient Phone Number': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Phone Number'}, - 'Patient Email Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-email', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Email Address'}, - 'Patient Gender': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Gender'}, - 'Encounter Date/Time': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter', 'Code Type': 'USCDI Data Class', 'Code Value': 'Encounter Date/Time'}, - 'Encounter Type': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter', 'Code Type': 'USCDI Data Class', 'Code Value': 'Encounter Type'}, - 'Encounter Location': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter', 'Code Type': 'USCDI Data Class', 'Code Value': 'Encounter Location'}, - 'Provider Name': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Name'}, - 'Provider NPI': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider NPI'}, - 'Provider Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-address', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Address'}, - 'Provider Phone Number': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Phone Number'}, - 'Provider Email Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-email', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Email Address'}, - 'Reason for Visit': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab', 'Code Type': 'USCDI Data Class', 'Code Value': 'Reason for Visit'}, - 'Lab Test Result': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core -... - -Failure reason - cannot continue. 3.5 could but in subsequent continue - Eval submitted - - -""") - -st.markdown(""" - -# Component UI Tile Sizes for Optimal Display - -Prompt on GPT-4: -``` -Display a list of top ten table size proportions dividing a typical computer screen into 2,3,4,5,6 tiles supporting the tiles in a table with each iframe sized so total width and height add to 2560 x 1440 for HD computer displays. - -# RLHF Feedback: -##👎GPT-4 Difference: Failed first time, submitted eval on fail. Tried again, different better outcome. - -Right Answer: - -Here is a list of top ten table size proportions for dividing a typical computer screen into 2, 3, 4, 5, and 6 tiles. Each iframe is sized so that the total width and height add up to 2560 x 1440 for HD computer displays: - -... -These table size proportions provide various arrangements for dividing a typical HD computer display into multiple tiles. You can choose the most suitable layout based on your requirements and preferences. - -Aaron Wacker -Show markdown code listing - -1. 2 tiles (1x2): - - Tile 1: 1280 x 1440 - - Tile 2: 1280 x 1440 - -2. 2 tiles (2x1): - - Tile 1: 2560 x 720 - - Tile 2: 2560 x 720 - -3. 3 tiles (1x3): - - Tile 1: 853 x 1440 - - Tile 2: 853 x 1440 - - Tile 3: 854 x 1440 - -4. 3 tiles (3x1): - - Tile 1: 2560 x 480 - - Tile 2: 2560 x 480 - - Tile 3: 2560 x 480 - -5. 4 tiles (2x2): - - Tile 1: 1280 x 720 - - Tile 2: 1280 x 720 - - Tile 3: 1280 x 720 - - Tile 4: 1280 x 720 - -6. 5 tiles (1x5): - - Tile 1: 512 x 1440 - - Tile 2: 512 x 1440 - - Tile 3: 512 x 1440 - - Tile 4: 512 x 1440 - - Tile 5: 512 x 1440 - -7. 5 tiles (5x1): - - Tile 1: 2560 x 288 - - Tile 2: 2560 x 288 - - Tile 3: 2560 x 288 - - Tile 4: 2560 x 288 - - Tile 5: 2560 x 288 - -8. 6 tiles (2x3): - - Tile 1: 1280 x 480 - - Tile 2: 1280 x 480 - - Tile 3: 1280 x 480 - - Tile 4: 1280 x 480 - - Tile 5: 1280 x 480 - - Tile 6: 1280 x 480 - -9. 6 tiles (3x2): - - Tile 1: 853 x 720 - - Tile 2: 853 x 720 - - Tile 3: 854 x 720 - - Tile 4: 853 x 720 - - Tile 5: 853 x 720 - - Tile 6: 854 x 720 - -10. 6 tiles (1x6): - - Tile 1: 426 x 1440 - - Tile 2: 426 x 1440 - - Tile 3: 426 x 1440 - - Tile 4: 426 x 1440 - - Tile 5: 426 x 1440 - - Tile 6: 426 x 1440 - -#👍RLHF: -## Provide Additional Feedback : GPT-4 Got this right and did it differently second time after an eval about an error. Well done - very useful - - -``` - -""") \ No newline at end of file diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/cluster/kmeans.py b/spaces/azusarang/so-vits-svc-models-ba_P/cluster/kmeans.py deleted file mode 100644 index 6111ea45e66a15d41b5b904be6f75affd3c4369f..0000000000000000000000000000000000000000 --- a/spaces/azusarang/so-vits-svc-models-ba_P/cluster/kmeans.py +++ /dev/null @@ -1,201 +0,0 @@ -import math,pdb -import torch,pynvml -from torch.nn.functional import normalize -from time import time -import numpy as np -# device=torch.device("cuda:0") -def _kpp(data: torch.Tensor, k: int, sample_size: int = -1): - """ Picks k points in the data based on the kmeans++ method. - - Parameters - ---------- - data : torch.Tensor - Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D - data, rank 2 multidimensional data, in which case one - row is one observation. - k : int - Number of samples to generate. - sample_size : int - sample data to avoid memory overflow during calculation - - Returns - ------- - init : ndarray - A 'k' by 'N' containing the initial centroids. - - References - ---------- - .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of - careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium - on Discrete Algorithms, 2007. - .. [2] scipy/cluster/vq.py: _kpp - """ - batch_size=data.shape[0] - if batch_size>sample_size: - data = data[torch.randint(0, batch_size,[sample_size], device=data.device)] - dims = data.shape[1] if len(data.shape) > 1 else 1 - init = torch.zeros((k, dims)).to(data.device) - r = torch.distributions.uniform.Uniform(0, 1) - for i in range(k): - if i == 0: - init[i, :] = data[torch.randint(data.shape[0], [1])] - else: - D2 = torch.cdist(init[:i, :][None, :], data[None, :], p=2)[0].amin(dim=0) - probs = D2 / torch.sum(D2) - cumprobs = torch.cumsum(probs, dim=0) - init[i, :] = data[torch.searchsorted(cumprobs, r.sample([1]).to(data.device))] - return init -class KMeansGPU: - ''' - Kmeans clustering algorithm implemented with PyTorch - - Parameters: - n_clusters: int, - Number of clusters - - max_iter: int, default: 100 - Maximum number of iterations - - tol: float, default: 0.0001 - Tolerance - - verbose: int, default: 0 - Verbosity - - mode: {'euclidean', 'cosine'}, default: 'euclidean' - Type of distance measure - - init_method: {'random', 'point', '++'} - Type of initialization - - minibatch: {None, int}, default: None - Batch size of MinibatchKmeans algorithm - if None perform full KMeans algorithm - - Attributes: - centroids: torch.Tensor, shape: [n_clusters, n_features] - cluster centroids - ''' - def __init__(self, n_clusters, max_iter=200, tol=1e-4, verbose=0, mode="euclidean",device=torch.device("cuda:0")): - self.n_clusters = n_clusters - self.max_iter = max_iter - self.tol = tol - self.verbose = verbose - self.mode = mode - self.device=device - pynvml.nvmlInit() - gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(device.index) - info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle) - self.minibatch=int(33e6/self.n_clusters*info.free/ 1024 / 1024 / 1024) - print("free_mem/GB:",info.free/ 1024 / 1024 / 1024,"minibatch:",self.minibatch) - - @staticmethod - def cos_sim(a, b): - """ - Compute cosine similarity of 2 sets of vectors - - Parameters: - a: torch.Tensor, shape: [m, n_features] - - b: torch.Tensor, shape: [n, n_features] - """ - return normalize(a, dim=-1) @ normalize(b, dim=-1).transpose(-2, -1) - - @staticmethod - def euc_sim(a, b): - """ - Compute euclidean similarity of 2 sets of vectors - Parameters: - a: torch.Tensor, shape: [m, n_features] - b: torch.Tensor, shape: [n, n_features] - """ - return 2 * a @ b.transpose(-2, -1) -(a**2).sum(dim=1)[..., :, None] - (b**2).sum(dim=1)[..., None, :] - - def max_sim(self, a, b): - """ - Compute maximum similarity (or minimum distance) of each vector - in a with all of the vectors in b - Parameters: - a: torch.Tensor, shape: [m, n_features] - b: torch.Tensor, shape: [n, n_features] - """ - if self.mode == 'cosine': - sim_func = self.cos_sim - elif self.mode == 'euclidean': - sim_func = self.euc_sim - sim = sim_func(a, b) - max_sim_v, max_sim_i = sim.max(dim=-1) - return max_sim_v, max_sim_i - - def fit_predict(self, X): - """ - Combination of fit() and predict() methods. - This is faster than calling fit() and predict() seperately. - Parameters: - X: torch.Tensor, shape: [n_samples, n_features] - centroids: {torch.Tensor, None}, default: None - if given, centroids will be initialized with given tensor - if None, centroids will be randomly chosen from X - Return: - labels: torch.Tensor, shape: [n_samples] - - mini_=33kk/k*remain - mini=min(mini_,fea_shape) - offset=log2(k/1000)*1.5 - kpp_all=min(mini_*10/offset,fea_shape) - kpp_sample=min(mini_/12/offset,fea_shape) - """ - assert isinstance(X, torch.Tensor), "input must be torch.Tensor" - assert X.dtype in [torch.half, torch.float, torch.double], "input must be floating point" - assert X.ndim == 2, "input must be a 2d tensor with shape: [n_samples, n_features] " - # print("verbose:%s"%self.verbose) - - offset = np.power(1.5,np.log(self.n_clusters / 1000))/np.log(2) - with torch.no_grad(): - batch_size= X.shape[0] - # print(self.minibatch, int(self.minibatch * 10 / offset), batch_size) - start_time = time() - if (self.minibatch*10//offset< batch_size): - x = X[torch.randint(0, batch_size,[int(self.minibatch*10/offset)])].to(self.device) - else: - x = X.to(self.device) - # print(x.device) - self.centroids = _kpp(x, self.n_clusters, min(int(self.minibatch/12/offset),batch_size)) - del x - torch.cuda.empty_cache() - # self.centroids = self.centroids.to(self.device) - num_points_in_clusters = torch.ones(self.n_clusters, device=self.device, dtype=X.dtype)#全1 - closest = None#[3098036]#int64 - if(self.minibatch>=batch_size//2 and self.minibatch=batch_size): - X=X.to(self.device) - for i in range(self.max_iter): - iter_time = time() - if self.minibatch= 2: - print('iter:', i, 'error:', error.item(), 'time spent:', round(time()-iter_time, 4)) - if error <= self.tol: - break - - if self.verbose >= 1: - print(f'used {i+1} iterations ({round(time()-start_time, 4)}s) to cluster {batch_size} items into {self.n_clusters} clusters') - return closest diff --git a/spaces/badayvedat/LLaVA/llava/eval/model_vqa_science.py b/spaces/badayvedat/LLaVA/llava/eval/model_vqa_science.py deleted file mode 100644 index aa77b39c0df7bcf0c8200f1282b165dee493ad73..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/LLaVA/llava/eval/model_vqa_science.py +++ /dev/null @@ -1,141 +0,0 @@ -import argparse -import torch -import os -import json -from tqdm import tqdm -import shortuuid - -from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN -from llava.conversation import conv_templates, SeparatorStyle -from llava.model.builder import load_pretrained_model -from llava.utils import disable_torch_init -from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria - -from PIL import Image -import math - - -def split_list(lst, n): - """Split a list into n (roughly) equal-sized chunks""" - chunk_size = math.ceil(len(lst) / n) # integer division - return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] - - -def get_chunk(lst, n, k): - chunks = split_list(lst, n) - return chunks[k] - - -def eval_model(args): - # Model - disable_torch_init() - model_path = os.path.expanduser(args.model_path) - model_name = get_model_name_from_path(model_path) - tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) - - questions = json.load(open(os.path.expanduser(args.question_file), "r")) - questions = get_chunk(questions, args.num_chunks, args.chunk_idx) - answers_file = os.path.expanduser(args.answers_file) - os.makedirs(os.path.dirname(answers_file), exist_ok=True) - ans_file = open(answers_file, "w") - for i, line in enumerate(tqdm(questions)): - idx = line["id"] - question = line['conversations'][0] - qs = question['value'].replace('', '').strip() - cur_prompt = qs - - if 'image' in line: - image_file = line["image"] - image = Image.open(os.path.join(args.image_folder, image_file)) - image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] - images = image_tensor.unsqueeze(0).half().cuda() - if getattr(model.config, 'mm_use_im_start_end', False): - qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs - else: - qs = DEFAULT_IMAGE_TOKEN + '\n' + qs - cur_prompt = '' + '\n' + cur_prompt - else: - images = None - - conv = conv_templates[args.conv_mode].copy() - conv.append_message(conv.roles[0], qs) - conv.append_message(conv.roles[1], None) - prompt = conv.get_prompt() - - input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() - - stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 - keywords = [stop_str] - stopping_criteria = [KeywordsStoppingCriteria(keywords, tokenizer, input_ids)] if conv.version == "v0" else None - - with torch.inference_mode(): - output_ids = model.generate( - input_ids, - images=images, - do_sample=True, - temperature=0.2, - max_new_tokens=1024, - use_cache=True, - stopping_criteria=stopping_criteria, - ) - - input_token_len = input_ids.shape[1] - n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() - if n_diff_input_output > 0: - print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') - outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] - outputs = outputs.strip() - if outputs.endswith(stop_str): - outputs = outputs[:-len(stop_str)] - outputs = outputs.strip() - - # prompt for answer - if args.answer_prompter: - outputs_reasoning = outputs - input_ids = tokenizer_image_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() - - with torch.inference_mode(): - output_ids = model.generate( - input_ids, - images=images, - do_sample=True, - temperature=0.2, - max_new_tokens=64, - use_cache=True, - stopping_criteria=[stopping_criteria]) - - input_token_len = input_ids.shape[1] - n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() - if n_diff_input_output > 0: - print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') - outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] - outputs = outputs.strip() - if outputs.endswith(stop_str): - outputs = outputs[:-len(stop_str)] - outputs = outputs.strip() - outputs = outputs_reasoning + '\n The answer is ' + outputs - - ans_id = shortuuid.uuid() - ans_file.write(json.dumps({"question_id": idx, - "prompt": cur_prompt, - "text": outputs, - "answer_id": ans_id, - "model_id": model_name, - "metadata": {}}) + "\n") - ans_file.flush() - ans_file.close() - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model-path", type=str, default="facebook/opt-350m") - parser.add_argument("--model-base", type=str, default=None) - parser.add_argument("--image-folder", type=str, default="") - parser.add_argument("--question-file", type=str, default="tables/question.json") - parser.add_argument("--answers-file", type=str, default="answer.jsonl") - parser.add_argument("--conv-mode", type=str, default="llava_v0") - parser.add_argument("--num-chunks", type=int, default=1) - parser.add_argument("--chunk-idx", type=int, default=0) - parser.add_argument("--answer-prompter", action="store_true") - args = parser.parse_args() - - eval_model(args) diff --git a/spaces/banana-projects/web3d/node_modules/three/src/core/DirectGeometry.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/core/DirectGeometry.d.ts deleted file mode 100644 index f1651790695f4a64cec9c3b62e1485715f6fd0f5..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/core/DirectGeometry.d.ts +++ /dev/null @@ -1,50 +0,0 @@ -import { Vector3 } from './../math/Vector3'; -import { Color } from './../math/Color'; -import { Vector2 } from './../math/Vector2'; -import { Vector4 } from './../math/Vector4'; -import { Box3 } from './../math/Box3'; -import { Sphere } from './../math/Sphere'; -import { Geometry } from './Geometry'; -import { Event } from './Face3'; -import { EventDispatcher } from './EventDispatcher'; -import { MorphTarget } from './Geometry'; -/** - * @see src/core/DirectGeometry.js - */ -export class DirectGeometry extends EventDispatcher { - constructor(); - - id: number; - uuid: string; - name: string; - type: string; - indices: number[]; - vertices: Vector3[]; - normals: Vector3[]; - colors: Color[]; - uvs: Vector2[]; - uvs2: Vector2[]; - groups: { start: number; materialIndex: number }[]; - morphTargets: MorphTarget[]; - skinWeights: Vector4[]; - skinIndices: Vector4[]; - boundingBox: Box3; - boundingSphere: Sphere; - verticesNeedUpdate: boolean; - normalsNeedUpdate: boolean; - colorsNeedUpdate: boolean; - uvsNeedUpdate: boolean; - groupsNeedUpdate: boolean; - - computeBoundingBox(): void; - computeBoundingSphere(): void; - computeGroups(geometry: Geometry): void; - fromGeometry(geometry: Geometry): DirectGeometry; - dispose(): void; - - // EventDispatcher mixins - addEventListener(type: string, listener: (event: Event) => void): void; - hasEventListener(type: string, listener: (event: Event) => void): boolean; - removeEventListener(type: string, listener: (event: Event) => void): void; - dispatchEvent(event: { type: string; [attachment: string]: any }): void; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/loaders/BufferGeometryLoader.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/loaders/BufferGeometryLoader.d.ts deleted file mode 100644 index fe3148f4ddf88a2cede3a12db5a9454a465ef865..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/loaders/BufferGeometryLoader.d.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { LoadingManager } from './LoadingManager'; -import { BufferGeometry } from './../core/BufferGeometry'; - -export class BufferGeometryLoader { - constructor(manager?: LoadingManager); - - manager: LoadingManager; - - load( - url: string, - onLoad: (bufferGeometry: BufferGeometry) => void, - onProgress?: (event: any) => void, - onError?: (event: any) => void - ): void; - parse(json: any): BufferGeometry; -} diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/__init__.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/__init__.py deleted file mode 100644 index cfb1e4d7bb221c429082bd389d9140e5b1cc07b0..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -import importlib -from copy import deepcopy -from os import path as osp - -from basicsr.utils import get_root_logger, scandir -from basicsr.utils.registry import ARCH_REGISTRY - -__all__ = ['build_network'] - -# automatically scan and import arch modules for registry -# scan all the files under the 'archs' folder and collect files ending with -# '_arch.py' -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames] - - -def build_network(opt): - opt = deepcopy(opt) - network_type = opt.pop('type') - net = ARCH_REGISTRY.get(network_type)(**opt) - logger = get_root_logger() - logger.info(f'Network [{net.__class__.__name__}] is created.') - return net diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/gfpgan/weights/README.md b/spaces/beihai/GFPGAN-V1.3-whole-image/gfpgan/weights/README.md deleted file mode 100644 index 4d7b7e642591ef88575d9e6c360a4d29e0cc1a4f..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/gfpgan/weights/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Weights - -Put the downloaded weights to this folder. diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/inference_gfpgan.py b/spaces/beihai/GFPGAN-V1.3-whole-image/inference_gfpgan.py deleted file mode 100644 index ac814a50ead170ab69f32c1714bd445c6c5baf17..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/inference_gfpgan.py +++ /dev/null @@ -1,116 +0,0 @@ -import argparse -import cv2 -import glob -import numpy as np -import os -import torch -from basicsr.utils import imwrite - -from gfpgan import GFPGANer - - -def main(): - """Inference demo for GFPGAN. - """ - parser = argparse.ArgumentParser() - parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image') - parser.add_argument('--arch', type=str, default='clean', help='The GFPGAN architecture. Option: clean | original') - parser.add_argument('--channel', type=int, default=2, help='Channel multiplier for large networks of StyleGAN2') - parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANv1.3.pth') - parser.add_argument('--bg_upsampler', type=str, default='realesrgan', help='background upsampler') - parser.add_argument( - '--bg_tile', type=int, default=400, help='Tile size for background sampler, 0 for no tile during testing') - parser.add_argument('--test_path', type=str, default='inputs/whole_imgs', help='Input folder') - parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces') - parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face') - parser.add_argument('--aligned', action='store_true', help='Input are aligned faces') - parser.add_argument('--paste_back', action='store_false', help='Paste the restored faces back to images') - parser.add_argument('--save_root', type=str, default='results', help='Path to save root') - parser.add_argument( - '--ext', - type=str, - default='auto', - help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') - args = parser.parse_args() - - args = parser.parse_args() - if args.test_path.endswith('/'): - args.test_path = args.test_path[:-1] - os.makedirs(args.save_root, exist_ok=True) - - # background upsampler - if args.bg_upsampler == 'realesrgan': - if not torch.cuda.is_available(): # CPU - import warnings - warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.') - bg_upsampler = None - else: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - bg_upsampler = RealESRGANer( - scale=2, - model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', - model=model, - tile=args.bg_tile, - tile_pad=10, - pre_pad=0, - half=True) # need to set False in CPU mode - else: - bg_upsampler = None - # set up GFPGAN restorer - restorer = GFPGANer( - model_path=args.model_path, - upscale=args.upscale, - arch=args.arch, - channel_multiplier=args.channel, - bg_upsampler=bg_upsampler) - - img_list = sorted(glob.glob(os.path.join(args.test_path, '*'))) - for img_path in img_list: - # read image - img_name = os.path.basename(img_path) - print(f'Processing {img_name} ...') - basename, ext = os.path.splitext(img_name) - input_img = cv2.imread(img_path, cv2.IMREAD_COLOR) - - # restore faces and background if necessary - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=args.paste_back) - - # save faces - for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)): - # save cropped face - save_crop_path = os.path.join(args.save_root, 'cropped_faces', f'{basename}_{idx:02d}.png') - imwrite(cropped_face, save_crop_path) - # save restored face - if args.suffix is not None: - save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png' - else: - save_face_name = f'{basename}_{idx:02d}.png' - save_restore_path = os.path.join(args.save_root, 'restored_faces', save_face_name) - imwrite(restored_face, save_restore_path) - # save comparison image - cmp_img = np.concatenate((cropped_face, restored_face), axis=1) - imwrite(cmp_img, os.path.join(args.save_root, 'cmp', f'{basename}_{idx:02d}.png')) - - # save restored img - if restored_img is not None: - if args.ext == 'auto': - extension = ext[1:] - else: - extension = args.ext - - if args.suffix is not None: - save_restore_path = os.path.join(args.save_root, 'restored_imgs', - f'{basename}_{args.suffix}.{extension}') - else: - save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}.{extension}') - imwrite(restored_img, save_restore_path) - - print(f'Results are in the [{args.save_root}] folder.') - - -if __name__ == '__main__': - main() diff --git a/spaces/bigjoker/stable-diffusion-webui/scripts/postprocessing_upscale.py b/spaces/bigjoker/stable-diffusion-webui/scripts/postprocessing_upscale.py deleted file mode 100644 index ccec72fcbc72eeffbe24a659bf53ecba71162391..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/scripts/postprocessing_upscale.py +++ /dev/null @@ -1,131 +0,0 @@ -from PIL import Image -import numpy as np - -from modules import scripts_postprocessing, shared -import gradio as gr - -from modules.ui_components import FormRow - - -upscale_cache = {} - - -class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): - name = "Upscale" - order = 1000 - - def ui(self): - selected_tab = gr.State(value=0) - - with gr.Tabs(elem_id="extras_resize_mode"): - with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: - upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") - - with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: - with FormRow(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") - - with FormRow(): - extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - - with FormRow(): - extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") - - tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) - tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) - - return { - "upscale_mode": selected_tab, - "upscale_by": upscaling_resize, - "upscale_to_width": upscaling_resize_w, - "upscale_to_height": upscaling_resize_h, - "upscale_crop": upscaling_crop, - "upscaler_1_name": extras_upscaler_1, - "upscaler_2_name": extras_upscaler_2, - "upscaler_2_visibility": extras_upscaler_2_visibility, - } - - def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop): - if upscale_mode == 1: - upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) - info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}" - else: - info["Postprocess upscale by"] = upscale_by - - cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - cached_image = upscale_cache.pop(cache_key, None) - - if cached_image is not None: - image = cached_image - else: - image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path) - - upscale_cache[cache_key] = image - if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache: - upscale_cache.pop(next(iter(upscale_cache), None), None) - - if upscale_mode == 1 and upscale_crop: - cropped = Image.new("RGB", (upscale_to_width, upscale_to_height)) - cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2)) - image = cropped - info["Postprocess crop to"] = f"{image.width}x{image.height}" - - return image - - def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): - if upscaler_1_name == "None": - upscaler_1_name = None - - upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None) - assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}' - - if not upscaler1: - return - - if upscaler_2_name == "None": - upscaler_2_name = None - - upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None) - assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}' - - upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - pp.info[f"Postprocess upscaler"] = upscaler1.name - - if upscaler2 and upscaler_2_visibility > 0: - second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) - - pp.info[f"Postprocess upscaler 2"] = upscaler2.name - - pp.image = upscaled_image - - def image_changed(self): - upscale_cache.clear() - - -class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale): - name = "Simple Upscale" - order = 900 - - def ui(self): - with FormRow(): - upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2) - - return { - "upscale_by": upscale_by, - "upscaler_name": upscaler_name, - } - - def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None): - if upscaler_name is None or upscaler_name == "None": - return - - upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None) - assert upscaler1, f'could not find upscaler named {upscaler_name}' - - pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False) - pp.info[f"Postprocess upscaler"] = upscaler1.name diff --git a/spaces/bigscience/promptsource/promptsource/utils.py b/spaces/bigscience/promptsource/promptsource/utils.py deleted file mode 100644 index ce57a2ac159508bdfb39a61188bd1031e28e6f6f..0000000000000000000000000000000000000000 --- a/spaces/bigscience/promptsource/promptsource/utils.py +++ /dev/null @@ -1,149 +0,0 @@ -# coding=utf-8 -import os - -import datasets -import requests - -from promptsource import DEFAULT_PROMPTSOURCE_CACHE_HOME -from promptsource.templates import INCLUDED_USERS - - -def removeHyphen(example): - example_clean = {} - for key in example.keys(): - if "-" in key: - new_key = key.replace("-", "_") - example_clean[new_key] = example[key] - else: - example_clean[key] = example[key] - example = example_clean - return example - - -def renameDatasetColumn(dataset): - col_names = dataset.column_names - for cols in col_names: - if "-" in cols: - dataset = dataset.rename_column(cols, cols.replace("-", "_")) - return dataset - - -# -# Helper functions for datasets library -# - - -def get_dataset_builder(path, conf=None): - "Get a dataset builder from name and conf." - module_path = datasets.load.dataset_module_factory(path) - builder_cls = datasets.load.import_main_class(module_path.module_path, dataset=True) - if conf: - builder_instance = builder_cls(name=conf, cache_dir=None, hash=module_path.hash) - else: - builder_instance = builder_cls(cache_dir=None, hash=module_path.hash) - return builder_instance - - -def get_dataset(path, conf=None): - "Get a dataset from name and conf." - builder_instance = get_dataset_builder(path, conf) - if builder_instance.manual_download_instructions is None and builder_instance.info.size_in_bytes is not None: - builder_instance.download_and_prepare() - return builder_instance.as_dataset() - else: - return load_dataset(path, conf) - - -def load_dataset(dataset_name, subset_name): - try: - return datasets.load_dataset(dataset_name, subset_name) - except datasets.builder.ManualDownloadError: - cache_root_dir = ( - os.environ["PROMPTSOURCE_MANUAL_DATASET_DIR"] - if "PROMPTSOURCE_MANUAL_DATASET_DIR" in os.environ - else DEFAULT_PROMPTSOURCE_CACHE_HOME - ) - data_dir = ( - f"{cache_root_dir}/{dataset_name}" - if subset_name is None - else f"{cache_root_dir}/{dataset_name}/{subset_name}" - ) - return datasets.load_dataset( - dataset_name, - subset_name, - data_dir=data_dir, - ) - - -def get_dataset_confs(path): - "Get the list of confs for a dataset." - module_path = datasets.load.dataset_module_factory(path).module_path - # Get dataset builder class from the processing script - builder_cls = datasets.load.import_main_class(module_path, dataset=True) - # Instantiate the dataset builder - confs = builder_cls.BUILDER_CONFIGS - if confs and len(confs) > 1: - return confs - return [] - - -def render_features(features): - """Recursively render the dataset schema (i.e. the fields).""" - if isinstance(features, dict): - return {k: render_features(v) for k, v in features.items()} - if isinstance(features, datasets.features.ClassLabel): - return features.names - - if isinstance(features, datasets.features.Value): - return features.dtype - - if isinstance(features, datasets.features.Sequence): - return {"[]": render_features(features.feature)} - return features - - -# -# Loads dataset information -# - - -def filter_english_datasets(): - """ - Filter English datasets based on language tags in metadata. - - Also includes the datasets of any users listed in INCLUDED_USERS - """ - english_datasets = [] - - response = requests.get("https://huggingface.co/api/datasets?full=true") - tags = response.json() - - for dataset in tags: - dataset_name = dataset["id"] - - is_community_dataset = "/" in dataset_name - if is_community_dataset: - user = dataset_name.split("/")[0] - if user in INCLUDED_USERS: - english_datasets.append(dataset_name) - continue - - if "cardData" not in dataset: - continue - metadata = dataset["cardData"] - - if "language" not in metadata: - continue - languages = metadata["language"] - - if "en" in languages or "en-US" in languages: - english_datasets.append(dataset_name) - - return sorted(english_datasets) - - -def list_datasets(): - """Get all the datasets to work with.""" - dataset_list = filter_english_datasets() - dataset_list.sort(key=lambda x: x.lower()) - return dataset_list diff --git a/spaces/bioriAsaeru/text-to-voice/Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser REPACK.md b/spaces/bioriAsaeru/text-to-voice/Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser REPACK.md deleted file mode 100644 index 7748f788587eded67e8e535826c49833a50653cc..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser REPACK.md +++ /dev/null @@ -1,140 +0,0 @@ - -

      Dragon City Hack Tool: How to Hack Dragon City in Five Minutes or Lesser

      - -

      Dragon City is a fun and addictive game where you can breed, hatch and collect dragons of different elements and rarities. You can also build your own dragon city, complete quests, join alliances and battle other players. However, the game can also be challenging and time-consuming, especially if you want to get the best dragons and resources. That's why many players are looking for a way to hack Dragon City and get unlimited gems, gold, food and other in-game items.

      - -

      If you are one of them, then you are in luck. In this article, we will show you how to use a Dragon City hack tool that can help you hack Dragon City in five minutes or lesser. This hack tool is easy to use, safe and effective. You don't need to jailbreak or root your device, or download any software. All you need is a web browser and an internet connection. With this hack tool, you can get access to all the features of the game without spending a dime.

      -

      Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser


      Download Zip ››› https://urloso.com/2uyQtL



      - -

      What is Dragon City Hack Tool?

      - -

      Dragon City hack tool is an online generator that can generate unlimited gems, gold, food and other in-game items for your Dragon City account. You can use it on any device, whether it's PC, Android or iOS. You don't need to download anything or install anything on your device. The hack tool works by connecting to the game server and injecting the resources into your account. It's fast, easy and secure.

      - -

      How to Use Dragon City Hack Tool?

      - -

      Using Dragon City hack tool is very simple. Just follow these steps:

      - -
        -
      1. Go to the Dragon City hack tool website.
      2. -
      3. Enter your game ID or username.
      4. -
      5. Select your platform (PC, Android or iOS).
      6. -
      7. Choose the amount of gems, gold, food and other items you want to generate.
      8. -
      9. Click on the "Hack" button.
      10. -
      11. Wait for a few seconds while the hack tool does its magic.
      12. -
      13. Verify that you are not a robot by completing a short survey or offer.
      14. -
      15. Enjoy your free resources!
      16. -
      - -

      Why Use Dragon City Hack Tool?

      - -

      There are many reasons why you should use Dragon City hack tool. Here are some of them:

      - -
        -
      • You can get unlimited gems, gold, food and other items for free.
      • -
      • You can save time and money by not having to buy or earn them in the game.
      • -
      • You can unlock all the dragons and buildings in the game.
      • -
      • You can breed rare and legendary dragons with ease.
      • -
      • You can level up faster and complete quests quicker.
      • -
      • You can dominate the battles and tournaments against other players.
      • -
      • You can have more fun and enjoyment playing the game.
      • -
      - -

      Is Dragon City Hack Tool Safe?

      - -

      Yes, Dragon City hack tool is safe to use. It has been tested by thousands of users and has never caused any problems. The hack tool uses advanced encryption technology to protect your account and data from being detected or banned by the game server. It also has a proxy system that hides your IP address and location from being traced. You don't have to worry about anything when using this hack tool.

      - -

      Conclusion

      - -

      Dragon City is a great game that can keep you entertained for hours. However, if you want to get the most out of it, you may need some help from a Dragon City hack tool. This hack tool can help you hack Dragon City in five minutes or lesser and get unlimited gems, gold, food and other items for free. You don't need to jailbreak or root your device, or download any software. All you need is a web browser and an internet connection. With this hack tool, you can enjoy the game without any limitations or restrictions.

      -

      - -

      If you are interested in using this hack tool, just go to the website below and follow the instructions. You will be amazed by how easy and fast it is to hack Dragon City with this tool. Don't miss this opportunity and try it now!

      - -

      Dragon City Hack Tool

      -

      What are the Features of Dragon City Hack Tool?

      - -

      Dragon City hack tool is not just a simple generator that can give you unlimited gems and gold. It also has many other features that can enhance your gaming experience and make you the best dragon master. Here are some of the features of Dragon City hack tool:

      - -
        -
      • Instant breeding: You can breed any two dragons instantly without waiting for hours or days.
      • -
      • Instant leveling: You can level up any dragon instantly without feeding them or spending gems.
      • -
      • Unlock all islands: You can unlock all the islands in the game without completing any requirements or spending gems.
      • -
      • Unlock all dragons: You can unlock all the dragons in the game, including the rare and legendary ones.
      • -
      • Anti-ban protection: You can use the hack tool without worrying about getting banned by the game server. The hack tool has a built-in anti-ban system that prevents detection.
      • -
      • No root or jailbreak required: You don't need to root or jailbreak your device to use the hack tool. It works on any device, whether it's PC, Android or iOS.
      • -
      - -

      How to Get Dragon City Hack Tool?

      - -

      If you want to get Dragon City hack tool and enjoy all its features, you don't need to look any further. You can get it right here on this website. All you need to do is follow these simple steps:

      - -
        -
      1. Click on the "Download" button below.
      2. -
      3. You will be redirected to a verification page. Complete a short survey or offer to prove that you are human.
      4. -
      5. After verification, you will get access to the download link.
      6. -
      7. Download the hack tool and install it on your device.
      8. -
      9. Run the hack tool and enter your game ID or username.
      10. -
      11. Select your platform (PC, Android or iOS).
      12. -
      13. Choose the amount of gems, gold, food and other items you want to generate.
      14. -
      15. Click on the "Hack" button and wait for a few seconds.
      16. -
      17. Enjoy your free resources!
      18. -
      - -

      Don't miss this chance and get Dragon City hack tool now! You will be amazed by how easy and fast it is to hack Dragon City with this tool. You will be able to breed, hatch and collect all the dragons you want, build your own dragon city, complete quests, join alliances and battle other players. You will have more fun and enjoyment playing the game. So what are you waiting for? Download Dragon City hack tool now and become the best dragon master!

      - -

      Download Dragon City Hack Tool

      -

      How to Hack Dragon City in Five Minutes or Lesser?

      - -

      If you are wondering how to hack Dragon City in five minutes or lesser, then you have come to the right place. In this article, we will show you a simple and effective method that can help you get unlimited gems and gold in Dragon City without spending any money or wasting any time. This method is based on using a Dragon City hack tool that can generate resources for your account in a matter of minutes.

      - -

      A Dragon City hack tool is an online generator that can connect to the game server and inject resources into your account. You don't need to download anything or install anything on your device. All you need is a web browser and an internet connection. With this hack tool, you can get access to all the features of the game without any limitations or restrictions.

      - -

      What are the Steps to Hack Dragon City in Five Minutes or Lesser?

      - -

      To hack Dragon City in five minutes or lesser, you just need to follow these simple steps:

      - -
        -
      1. Go to the Dragon City hack tool website by clicking on the link below.
      2. -
      3. Enter your game ID or username.
      4. -
      5. Select your platform (PC, Android or iOS).
      6. -
      7. Choose the amount of gems and gold you want to generate.
      8. -
      9. Click on the "Generate" button.
      10. -
      11. Wait for a few seconds while the hack tool does its work.
      12. -
      13. Verify that you are not a robot by completing a short survey or offer.
      14. -
      15. Check your game account and enjoy your free resources!
      16. -
      - -

      That's it! You have successfully hacked Dragon City in five minutes or lesser. You can now breed, hatch and collect all the dragons you want, build your own dragon city, complete quests, join alliances and battle other players. You can also repeat the process as many times as you want to get more resources whenever you need them.

      - -

      Why Should You Hack Dragon City in Five Minutes or Lesser?

      - -

      There are many reasons why you should hack Dragon City in five minutes or lesser. Here are some of them:

      - -
        -
      • You can get unlimited gems and gold for free.
      • -
      • You can save time and money by not having to buy or earn them in the game.
      • -
      • You can unlock all the dragons and buildings in the game.
      • -
      • You can breed rare and legendary dragons with ease.
      • -
      • You can level up faster and complete quests quicker.
      • -
      • You can dominate the battles and tournaments against other players.
      • -
      • You can have more fun and enjoyment playing the game.
      • -
      - -

      Is It Safe to Hack Dragon City in Five Minutes or Lesser?

      - -

      Yes, it is safe to hack Dragon City in five minutes or lesser. The hack tool is tested by thousands of users and has never caused any problems. The hack tool uses advanced encryption technology to protect your account and data from being detected or banned by the game server. It also has a proxy system that hides your IP address and location from being traced. You don't have to worry about anything when using this hack tool.

      - -

      Conclusion

      - -

      Hacking Dragon City in five minutes or lesser is possible and easy with the help of a Dragon City hack tool. This hack tool can help you get unlimited gems and gold in Dragon City without spending any money or wasting any time. You don't need to download anything or install anything on your device. All you need is a web browser and an internet connection. With this hack tool, you can enjoy the game without any limitations or restrictions.

      - -

      If you are interested in hacking Dragon City in five minutes or lesser, then don't hesitate and click on the link below. You will be amazed by how easy and fast it is to hack Dragon City with this tool. Don't miss this opportunity and try it now!

      - -

      Hack Dragon City in Five Minutes or Lesser

      -

      Dragon City is a fun and addictive game that can keep you entertained for hours. However, if you want to get the most out of it, you may need some help from a Dragon City hack tool. This hack tool can help you hack Dragon City in five minutes or lesser and get unlimited gems and gold for free. You don't need to jailbreak or root your device, or download any software. All you need is a web browser and an internet connection. With this hack tool, you can enjoy the game without any limitations or restrictions.

      - -

      If you are interested in using this hack tool, just go to the website below and follow the instructions. You will be amazed by how easy and fast it is to hack Dragon City with this tool. Don't miss this opportunity and try it now!

      - -

      Hack Dragon City in Five Minutes or Lesser

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/For King And Country Run Wild Mp3 Download dbpower corazon cant The Lyrics and Chords of the Song.md b/spaces/bioriAsaeru/text-to-voice/For King And Country Run Wild Mp3 Download dbpower corazon cant The Lyrics and Chords of the Song.md deleted file mode 100644 index c1f4d99e761a6252b4a12f227442b9710b30660e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/For King And Country Run Wild Mp3 Download dbpower corazon cant The Lyrics and Chords of the Song.md +++ /dev/null @@ -1,6 +0,0 @@ -

      For King And Country Run Wild Mp3 Download dbpower corazon cant


      Downloadhttps://urloso.com/2uyPZi



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/bioriAsaeru/text-to-voice/Get Mastery Robert Greene Pdf Download The Ultimate Book on Power Skill and Creativity.md b/spaces/bioriAsaeru/text-to-voice/Get Mastery Robert Greene Pdf Download The Ultimate Book on Power Skill and Creativity.md deleted file mode 100644 index b799d1ad9ada25ab51e076b427c10bcadd2bd0e2..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Get Mastery Robert Greene Pdf Download The Ultimate Book on Power Skill and Creativity.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Mastery Robert Greene Pdf Download


      Download File > https://urloso.com/2uySa2



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/bioriAsaeru/text-to-voice/Halo New Blood Epub Download.md b/spaces/bioriAsaeru/text-to-voice/Halo New Blood Epub Download.md deleted file mode 100644 index f7c96856387e2ab0f6441c712ea43fd479a583ff..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Halo New Blood Epub Download.md +++ /dev/null @@ -1,20 +0,0 @@ -

      Halo New Blood Epub Download


      Downloadhttps://urloso.com/2uyS8P



      -
      -16.- Written in Red: A Cyberpunk Anthology (2013) by Peter David 17.- Final Flight (2013) by Tom Jolly - -10.- These number by Shawn and Corina together make it possible for Corina to play Noughts and Crosses without any computer. She usually gets her results about four games out of ten - -11.- A day to remember (an anniversary and a final day) by Shannon Messenger. It's the day they had fallen in love (twenty-seven years ago). They are now married, have two children and live in a small town in Michigan. - -12.- Last Leaf by Donna Gaines. A sequel to her bestseller, Blossom, and the complete story. - -13.- Dolcis by Heather Duffelink and Mark Sefton. It's the tale of an adopted boy from Vancouver who is shipped off to Nova Scotia in his youth and who is adopted by a family in the town of Dolcis. They will change their name. The story is going to be published in May 2014. - -14.- My Perfect Husband by William C. Dietz. In this story the main character, William has just broken up with his girlfriend. He is sitting in his trailer home, in Seattle, when suddenly a gorgeous blonde woman walks up and knocks on his door. This is the start of the story. - -15.- Shadow and Bone by Leigh Bardugo. This book has lots of characters but the main one is the heroine, Alina Starkov. She's a girl with no money, no love and no family. But she has a talent and that is: she can kill with her feet. She must try to survive in a country where half of the people want to kill her and the other half wish they were her. It's the story of Alina's rise to power and her quest to find out why she is so special and why she must kill people for a living. - -16.- It's not often that I like science fiction. It's normally that kind of books that I find really dull. But somehow, I just found this story great. The story, "Don't put it down!" is about a couple of scientists working together on the finest robot on the planet. The robot is called Nomula and is built as a weapon. It was designed as a robot to work for others and to become an assassin, but the people at the top of the organisation don't want that. But the robot is programmed 4fefd39f24
      -
      -
      -

      diff --git a/spaces/bradarrML/Diffusion_Space/README.md b/spaces/bradarrML/Diffusion_Space/README.md deleted file mode 100644 index 9dc74f62b98e9081f685e4f3f0535b37f8bf9daa..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/Diffusion_Space/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Diffusion Space -emoji: 💽 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: true -license: creativeml-openrail-m -duplicated_from: nitrosocke/Diffusion_Space ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/breadlicker45/Muse-gen/README.md b/spaces/breadlicker45/Muse-gen/README.md deleted file mode 100644 index 4e2e20bd9ae161c409002a091bbc020066cc5a8d..0000000000000000000000000000000000000000 --- a/spaces/breadlicker45/Muse-gen/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Muse text gen -emoji: ⚡ -colorFrom: blue -colorTo: white -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: true -duplicated_from: Pippoz/Hugging_Space ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/postprocessing.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/postprocessing.py deleted file mode 100644 index 84512606a43d6991df0ae1f046164eb3c70d751a..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/postprocessing.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch -from torch.nn import functional as F - -from detectron2.structures import Instances, ROIMasks - - -# perhaps should rename to "resize_instance" -def detector_postprocess( - results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5 -): - """ - Resize the output instances. - The input images are often resized when entering an object detector. - As a result, we often need the outputs of the detector in a different - resolution from its inputs. - - This function will resize the raw outputs of an R-CNN detector - to produce outputs according to the desired output resolution. - - Args: - results (Instances): the raw outputs from the detector. - `results.image_size` contains the input image resolution the detector sees. - This object might be modified in-place. - output_height, output_width: the desired output resolution. - Returns: - Instances: the resized output from the model, based on the output resolution - """ - if isinstance(output_width, torch.Tensor): - # This shape might (but not necessarily) be tensors during tracing. - # Converts integer tensors to float temporaries to ensure true - # division is performed when computing scale_x and scale_y. - output_width_tmp = output_width.float() - output_height_tmp = output_height.float() - new_size = torch.stack([output_height, output_width]) - else: - new_size = (output_height, output_width) - output_width_tmp = output_width - output_height_tmp = output_height - - scale_x, scale_y = ( - output_width_tmp / results.image_size[1], - output_height_tmp / results.image_size[0], - ) - results = Instances(new_size, **results.get_fields()) - - if results.has("pred_boxes"): - output_boxes = results.pred_boxes - elif results.has("proposal_boxes"): - output_boxes = results.proposal_boxes - else: - output_boxes = None - assert output_boxes is not None, "Predictions must contain boxes!" - - output_boxes.scale(scale_x, scale_y) - output_boxes.clip(results.image_size) - - results = results[output_boxes.nonempty()] - - if results.has("pred_masks"): - if isinstance(results.pred_masks, ROIMasks): - roi_masks = results.pred_masks - else: - # pred_masks is a tensor of shape (N, 1, M, M) - roi_masks = ROIMasks(results.pred_masks[:, 0, :, :]) - results.pred_masks = roi_masks.to_bitmasks( - results.pred_boxes, output_height, output_width, mask_threshold - ).tensor # TODO return ROIMasks/BitMask object in the future - - if results.has("pred_keypoints"): - results.pred_keypoints[:, :, 0] *= scale_x - results.pred_keypoints[:, :, 1] *= scale_y - - return results - - -def sem_seg_postprocess(result, img_size, output_height, output_width): - """ - Return semantic segmentation predictions in the original resolution. - - The input images are often resized when entering semantic segmentor. Moreover, in same - cases, they also padded inside segmentor to be divisible by maximum network stride. - As a result, we often need the predictions of the segmentor in a different - resolution from its inputs. - - Args: - result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), - where C is the number of classes, and H, W are the height and width of the prediction. - img_size (tuple): image size that segmentor is taking as input. - output_height, output_width: the desired output resolution. - - Returns: - semantic segmentation prediction (Tensor): A tensor of the shape - (C, output_height, output_width) that contains per-pixel soft predictions. - """ - result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) - result = F.interpolate( - result, size=(output_height, output_width), mode="bilinear", align_corners=False - )[0] - return result diff --git a/spaces/bugbugbug/vits-uma-genshin-honkai/Docker/Dockerfile b/spaces/bugbugbug/vits-uma-genshin-honkai/Docker/Dockerfile deleted file mode 100644 index 4d39cdf02a2ec151686cc1d61234bf723068fed8..0000000000000000000000000000000000000000 --- a/spaces/bugbugbug/vits-uma-genshin-honkai/Docker/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM python:3.9-bullseye -VOLUME ["/app"] -WORKDIR /app -# Set apt to Chinese mirror -RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list -RUN apt-get update && apt-get -y install cmake git -RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai -WORKDIR /app/vits-uma-genshin-honkai -RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py -ADD vits.sh /app/vits.sh -EXPOSE 7860 -ENTRYPOINT [ "/app/vits.sh" ] \ No newline at end of file diff --git a/spaces/captainChan/CaptainChan/utils.py b/spaces/captainChan/CaptainChan/utils.py deleted file mode 100644 index 1b7b5db1bc1dd191191c31b3e72228ccd1c4f7a1..0000000000000000000000000000000000000000 --- a/spaces/captainChan/CaptainChan/utils.py +++ /dev/null @@ -1,304 +0,0 @@ -import logging -import os -import time - -import cv2 -import numpy as np -import torch -import yaml -from matplotlib import colors -from matplotlib import pyplot as plt -from torch import Tensor, nn -from torch.utils.data import ConcatDataset - -class CharsetMapper(object): - """A simple class to map ids into strings. - - It works only when the character set is 1:1 mapping between individual - characters and individual ids. - """ - - def __init__(self, - filename='', - max_length=30, - null_char=u'\u2591'): - """Creates a lookup table. - - Args: - filename: Path to charset file which maps characters to ids. - max_sequence_length: The max length of ids and string. - null_char: A unicode character used to replace '' character. - the default value is a light shade block '░'. - """ - self.null_char = null_char - self.max_length = max_length - - self.label_to_char = self._read_charset(filename) - self.char_to_label = dict(map(reversed, self.label_to_char.items())) - self.num_classes = len(self.label_to_char) - - def _read_charset(self, filename): - """Reads a charset definition from a tab separated text file. - - Args: - filename: a path to the charset file. - - Returns: - a dictionary with keys equal to character codes and values - unicode - characters. - """ - import re - pattern = re.compile(r'(\d+)\t(.+)') - charset = {} - self.null_label = 0 - charset[self.null_label] = self.null_char - with open(filename, 'r') as f: - for i, line in enumerate(f): - m = pattern.match(line) - assert m, f'Incorrect charset file. line #{i}: {line}' - label = int(m.group(1)) + 1 - char = m.group(2) - charset[label] = char - return charset - - def trim(self, text): - assert isinstance(text, str) - return text.replace(self.null_char, '') - - def get_text(self, labels, length=None, padding=True, trim=False): - """ Returns a string corresponding to a sequence of character ids. - """ - length = length if length else self.max_length - labels = [l.item() if isinstance(l, Tensor) else int(l) for l in labels] - if padding: - labels = labels + [self.null_label] * (length-len(labels)) - text = ''.join([self.label_to_char[label] for label in labels]) - if trim: text = self.trim(text) - return text - - def get_labels(self, text, length=None, padding=True, case_sensitive=False): - """ Returns the labels of the corresponding text. - """ - length = length if length else self.max_length - if padding: - text = text + self.null_char * (length - len(text)) - if not case_sensitive: - text = text.lower() - labels = [self.char_to_label[char] for char in text] - return labels - - def pad_labels(self, labels, length=None): - length = length if length else self.max_length - - return labels + [self.null_label] * (length - len(labels)) - - @property - def digits(self): - return '0123456789' - - @property - def digit_labels(self): - return self.get_labels(self.digits, padding=False) - - @property - def alphabets(self): - all_chars = list(self.char_to_label.keys()) - valid_chars = [] - for c in all_chars: - if c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': - valid_chars.append(c) - return ''.join(valid_chars) - - @property - def alphabet_labels(self): - return self.get_labels(self.alphabets, padding=False) - - -class Timer(object): - """A simple timer.""" - def __init__(self): - self.data_time = 0. - self.data_diff = 0. - self.data_total_time = 0. - self.data_call = 0 - self.running_time = 0. - self.running_diff = 0. - self.running_total_time = 0. - self.running_call = 0 - - def tic(self): - self.start_time = time.time() - self.running_time = self.start_time - - def toc_data(self): - self.data_time = time.time() - self.data_diff = self.data_time - self.running_time - self.data_total_time += self.data_diff - self.data_call += 1 - - def toc_running(self): - self.running_time = time.time() - self.running_diff = self.running_time - self.data_time - self.running_total_time += self.running_diff - self.running_call += 1 - - def total_time(self): - return self.data_total_time + self.running_total_time - - def average_time(self): - return self.average_data_time() + self.average_running_time() - - def average_data_time(self): - return self.data_total_time / (self.data_call or 1) - - def average_running_time(self): - return self.running_total_time / (self.running_call or 1) - - -class Logger(object): - _handle = None - _root = None - - @staticmethod - def init(output_dir, name, phase): - format = '[%(asctime)s %(filename)s:%(lineno)d %(levelname)s {}] ' \ - '%(message)s'.format(name) - logging.basicConfig(level=logging.INFO, format=format) - - try: os.makedirs(output_dir) - except: pass - config_path = os.path.join(output_dir, f'{phase}.txt') - Logger._handle = logging.FileHandler(config_path) - Logger._root = logging.getLogger() - - @staticmethod - def enable_file(): - if Logger._handle is None or Logger._root is None: - raise Exception('Invoke Logger.init() first!') - Logger._root.addHandler(Logger._handle) - - @staticmethod - def disable_file(): - if Logger._handle is None or Logger._root is None: - raise Exception('Invoke Logger.init() first!') - Logger._root.removeHandler(Logger._handle) - - -class Config(object): - - def __init__(self, config_path, host=True): - def __dict2attr(d, prefix=''): - for k, v in d.items(): - if isinstance(v, dict): - __dict2attr(v, f'{prefix}{k}_') - else: - if k == 'phase': - assert v in ['train', 'test'] - if k == 'stage': - assert v in ['pretrain-vision', 'pretrain-language', - 'train-semi-super', 'train-super'] - self.__setattr__(f'{prefix}{k}', v) - - assert os.path.exists(config_path), '%s does not exists!' % config_path - with open(config_path) as file: - config_dict = yaml.load(file, Loader=yaml.FullLoader) - with open('configs/template.yaml') as file: - default_config_dict = yaml.load(file, Loader=yaml.FullLoader) - __dict2attr(default_config_dict) - __dict2attr(config_dict) - self.global_workdir = os.path.join(self.global_workdir, self.global_name) - - def __getattr__(self, item): - attr = self.__dict__.get(item) - if attr is None: - attr = dict() - prefix = f'{item}_' - for k, v in self.__dict__.items(): - if k.startswith(prefix): - n = k.replace(prefix, '') - attr[n] = v - return attr if len(attr) > 0 else None - else: - return attr - - def __repr__(self): - str = 'ModelConfig(\n' - for i, (k, v) in enumerate(sorted(vars(self).items())): - str += f'\t({i}): {k} = {v}\n' - str += ')' - return str - -def blend_mask(image, mask, alpha=0.5, cmap='jet', color='b', color_alpha=1.0): - # normalize mask - mask = (mask-mask.min()) / (mask.max() - mask.min() + np.finfo(float).eps) - if mask.shape != image.shape: - mask = cv2.resize(mask,(image.shape[1], image.shape[0])) - # get color map - color_map = plt.get_cmap(cmap) - mask = color_map(mask)[:,:,:3] - # convert float to uint8 - mask = (mask * 255).astype(dtype=np.uint8) - - # set the basic color - basic_color = np.array(colors.to_rgb(color)) * 255 - basic_color = np.tile(basic_color, [image.shape[0], image.shape[1], 1]) - basic_color = basic_color.astype(dtype=np.uint8) - # blend with basic color - blended_img = cv2.addWeighted(image, color_alpha, basic_color, 1-color_alpha, 0) - # blend with mask - blended_img = cv2.addWeighted(blended_img, alpha, mask, 1-alpha, 0) - - return blended_img - -def onehot(label, depth, device=None): - """ - Args: - label: shape (n1, n2, ..., ) - depth: a scalar - - Returns: - onehot: (n1, n2, ..., depth) - """ - if not isinstance(label, torch.Tensor): - label = torch.tensor(label, device=device) - onehot = torch.zeros(label.size() + torch.Size([depth]), device=device) - onehot = onehot.scatter_(-1, label.unsqueeze(-1), 1) - - return onehot - -class MyDataParallel(nn.DataParallel): - - def gather(self, outputs, target_device): - r""" - Gathers tensors from different GPUs on a specified device - (-1 means the CPU). - """ - def gather_map(outputs): - out = outputs[0] - if isinstance(out, (str, int, float)): - return out - if isinstance(out, list) and isinstance(out[0], str): - return [o for out in outputs for o in out] - if isinstance(out, torch.Tensor): - return torch.nn.parallel._functions.Gather.apply(target_device, self.dim, *outputs) - if out is None: - return None - if isinstance(out, dict): - if not all((len(out) == len(d) for d in outputs)): - raise ValueError('All dicts must have the same number of keys') - return type(out)(((k, gather_map([d[k] for d in outputs])) - for k in out)) - return type(out)(map(gather_map, zip(*outputs))) - - # Recursive function calls like this create reference cycles. - # Setting the function to None clears the refcycle. - try: - res = gather_map(outputs) - finally: - gather_map = None - return res - - -class MyConcatDataset(ConcatDataset): - def __getattr__(self, k): - return getattr(self.datasets[0], k) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/tracking/__init__.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/tracking/__init__.py deleted file mode 100644 index 21078ae822b04b71dbd8b056b5993d173eaf6bff..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/tracking/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .base_tracker import ( # noqa - BaseTracker, - build_tracker_head, - TRACKER_HEADS_REGISTRY, -) -from .bbox_iou_tracker import BBoxIOUTracker # noqa -from .hungarian_tracker import BaseHungarianTracker # noqa -from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa - IOUWeightedHungarianBBoxIOUTracker, -) -from .utils import create_prediction_pairs # noqa -from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/ccolas/TastyPiano/src/cocktails/representation_learning/run.py b/spaces/ccolas/TastyPiano/src/cocktails/representation_learning/run.py deleted file mode 100644 index a1278ac80039d25130b4c05bc5670bcfe197d13a..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/cocktails/representation_learning/run.py +++ /dev/null @@ -1,557 +0,0 @@ -import torch; torch.manual_seed(0) -import torch.utils -from torch.utils.data import DataLoader -import torch.distributions -import torch.nn as nn -import matplotlib.pyplot as plt; plt.rcParams['figure.dpi'] = 200 -from src.cocktails.representation_learning.dataset import MyDataset, get_representation_from_ingredient, get_max_n_ingredients -import json -import pandas as pd -import numpy as np -import os -from src.cocktails.representation_learning.vae_model import get_vae_model -from src.cocktails.config import COCKTAILS_CSV_DATA, FULL_COCKTAIL_REP_PATH, EXPERIMENT_PATH -from src.cocktails.utilities.cocktail_utilities import get_bunch_of_rep_keys -from src.cocktails.utilities.ingredients_utilities import ingredient_profiles -from resource import getrusage -from resource import RUSAGE_SELF -import gc -gc.collect(2) -device = 'cuda' if torch.cuda.is_available() else 'cpu' - -def get_params(): - data = pd.read_csv(COCKTAILS_CSV_DATA) - max_ingredients, ingredient_set, liquor_set, liqueur_set = get_max_n_ingredients(data) - num_ingredients = len(ingredient_set) - rep_keys = get_bunch_of_rep_keys()['custom'] - ing_keys = [k.split(' ')[1] for k in rep_keys] - ing_keys.remove('volume') - nb_ing_categories = len(set(ingredient_profiles['type'])) - category_encodings = dict(zip(sorted(set(ingredient_profiles['type'])), np.eye(nb_ing_categories))) - - params = dict(trial_id='test', - save_path=EXPERIMENT_PATH + "/deepset_vae/", - nb_epochs=2000, - print_every=50, - plot_every=100, - batch_size=64, - lr=0.001, - dropout=0., - nb_epoch_switch_beta=600, - latent_dim=10, - beta_vae=0.2, - ing_keys=ing_keys, - nb_ingredients=len(ingredient_set), - hidden_dims_ingredients=[128], - hidden_dims_cocktail=[32], - hidden_dims_decoder=[32], - agg='mean', - activation='relu', - auxiliaries_dict=dict(categories=dict(weight=0, type='classif', final_activ=None, dim_output=len(set(data['subcategory']))), - glasses=dict(weight=0, type='classif', final_activ=None, dim_output=len(set(data['glass']))), - prep_type=dict(weight=0, type='classif', final_activ=None, dim_output=len(set(data['category']))), - cocktail_reps=dict(weight=0, type='regression', final_activ=None, dim_output=13), - volume=dict(weight=0, type='regression', final_activ='relu', dim_output=1), - taste_reps=dict(weight=0, type='regression', final_activ='relu', dim_output=2), - ingredients_presence=dict(weight=0, type='multiclassif', final_activ=None, dim_output=num_ingredients)), - category_encodings=category_encodings - ) - # params = dict(trial_id='test', - # save_path=EXPERIMENT_PATH + "/deepset_vae/", - # nb_epochs=1000, - # print_every=50, - # plot_every=100, - # batch_size=64, - # lr=0.001, - # dropout=0., - # nb_epoch_switch_beta=500, - # latent_dim=64, - # beta_vae=0.3, - # ing_keys=ing_keys, - # nb_ingredients=len(ingredient_set), - # hidden_dims_ingredients=[128], - # hidden_dims_cocktail=[128, 128], - # hidden_dims_decoder=[128, 128], - # agg='mean', - # activation='mish', - # auxiliaries_dict=dict(categories=dict(weight=0.5, type='classif', final_activ=None, dim_output=len(set(data['subcategory']))), - # glasses=dict(weight=0.03, type='classif', final_activ=None, dim_output=len(set(data['glass']))), - # prep_type=dict(weight=0.02, type='classif', final_activ=None, dim_output=len(set(data['category']))), - # cocktail_reps=dict(weight=1, type='regression', final_activ=None, dim_output=13), - # volume=dict(weight=1, type='regression', final_activ='relu', dim_output=1), - # taste_reps=dict(weight=1, type='regression', final_activ='relu', dim_output=2), - # ingredients_presence=dict(weight=1.5, type='multiclassif', final_activ=None, dim_output=num_ingredients)), - # category_encodings=category_encodings - # ) - water_rep, indexes_to_normalize = get_representation_from_ingredient(ingredients=['water'], quantities=[1], - max_q_per_ing=dict(zip(ingredient_set, [1] * num_ingredients)), index=0, - params=params) - dim_rep_ingredient = water_rep.size - params['indexes_ing_to_normalize'] = indexes_to_normalize - params['deepset_latent_dim'] = dim_rep_ingredient * max_ingredients - params['input_dim'] = dim_rep_ingredient - params['dim_rep_ingredient'] = dim_rep_ingredient - params = compute_expe_name_and_save_path(params) - del params['category_encodings'] # to dump - with open(params['save_path'] + 'params.json', 'w') as f: - json.dump(params, f) - - params = complete_params(params) - return params - -def complete_params(params): - data = pd.read_csv(COCKTAILS_CSV_DATA) - cocktail_reps = np.loadtxt(FULL_COCKTAIL_REP_PATH) - nb_ing_categories = len(set(ingredient_profiles['type'])) - category_encodings = dict(zip(sorted(set(ingredient_profiles['type'])), np.eye(nb_ing_categories))) - params['cocktail_reps'] = cocktail_reps - params['raw_data'] = data - params['category_encodings'] = category_encodings - return params - -def compute_losses_and_accuracies(loss_functions, auxiliaries, auxiliaries_str, outputs, data): - losses = dict() - accuracies = dict() - other_metrics = dict() - for i_k, k in enumerate(auxiliaries_str): - # get ground truth - # compute loss - if k == 'volume': - outputs[i_k] = outputs[i_k].flatten() - ground_truth = auxiliaries[k] - if ground_truth.dtype == torch.float64: - losses[k] = loss_functions[k](outputs[i_k], ground_truth.float()).float() - elif ground_truth.dtype == torch.int64: - if str(loss_functions[k]) != "BCEWithLogitsLoss()": - losses[k] = loss_functions[k](outputs[i_k].float(), ground_truth.long()).float() - else: - losses[k] = loss_functions[k](outputs[i_k].float(), ground_truth.float()).float() - else: - losses[k] = loss_functions[k](outputs[i_k], ground_truth).float() - # compute accuracies - if str(loss_functions[k]) == 'CrossEntropyLoss()': - bs, n_options = outputs[i_k].shape - predicted = outputs[i_k].argmax(dim=1).detach().numpy() - true = ground_truth.int().detach().numpy() - confusion_matrix = np.zeros([n_options, n_options]) - for i in range(bs): - confusion_matrix[true[i], predicted[i]] += 1 - acc = confusion_matrix.diagonal().sum() / bs - for i in range(n_options): - if confusion_matrix[i].sum() != 0: - confusion_matrix[i] /= confusion_matrix[i].sum() - other_metrics[k + '_confusion'] = confusion_matrix - accuracies[k] = np.mean(outputs[i_k].argmax(dim=1).detach().numpy() == ground_truth.int().detach().numpy()) - assert (acc - accuracies[k]) < 1e-5 - - elif str(loss_functions[k]) == 'BCEWithLogitsLoss()': - assert k == 'ingredients_presence' - outputs_rescaled = outputs[i_k].detach().numpy() * data.dataset.std_ing_quantities + data.dataset.mean_ing_quantities - predicted_presence = (outputs_rescaled > 0).astype(bool) - presence = ground_truth.detach().numpy().astype(bool) - other_metrics[k + '_false_positive'] = np.mean(np.logical_and(predicted_presence.astype(bool), ~presence.astype(bool))) - other_metrics[k + '_false_negative'] = np.mean(np.logical_and(~predicted_presence.astype(bool), presence.astype(bool))) - accuracies[k] = np.mean(predicted_presence == presence) # accuracy for multi class labeling - elif str(loss_functions[k]) == 'MSELoss()': - accuracies[k] = np.nan - else: - raise ValueError - return losses, accuracies, other_metrics - -def compute_metric_output(aux_other_metrics, data, ingredient_quantities, x_hat): - ing_q = ingredient_quantities.detach().numpy() * data.dataset.std_ing_quantities + data.dataset.mean_ing_quantities - ing_presence = (ing_q > 0) - x_hat = x_hat.detach().numpy() * data.dataset.std_ing_quantities + data.dataset.mean_ing_quantities - # abs_diff = np.abs(ing_q - x_hat) * data.dataset.max_ing_quantities - abs_diff = np.abs(ing_q - x_hat) - ing_q_abs_loss_when_present, ing_q_abs_loss_when_absent = [], [] - for i in range(ingredient_quantities.shape[0]): - ing_q_abs_loss_when_present.append(np.mean(abs_diff[i, np.where(ing_presence[i])])) - ing_q_abs_loss_when_absent.append(np.mean(abs_diff[i, np.where(~ing_presence[i])])) - aux_other_metrics['ing_q_abs_loss_when_present'] = np.mean(ing_q_abs_loss_when_present) - aux_other_metrics['ing_q_abs_loss_when_absent'] = np.mean(ing_q_abs_loss_when_absent) - return aux_other_metrics - -def run_epoch(opt, train, model, data, loss_functions, weights, params): - if train: - model.train() - else: - model.eval() - - # prepare logging of losses - losses = dict(kld_loss=[], - mse_loss=[], - vae_loss=[], - volume_loss=[], - global_loss=[]) - accuracies = dict() - other_metrics = dict() - for aux in params['auxiliaries_dict'].keys(): - losses[aux] = [] - accuracies[aux] = [] - if train: opt.zero_grad() - - for d in data: - nb_ingredients = d[0] - batch_size = nb_ingredients.shape[0] - x_ingredients = d[1].float() - ingredient_quantities = d[2] - cocktail_reps = d[3] - auxiliaries = d[4] - for k in auxiliaries.keys(): - if auxiliaries[k].dtype == torch.float64: auxiliaries[k] = auxiliaries[k].float() - taste_valid = d[-1] - x = x_ingredients.to(device) - x_hat, z, mean, log_var, outputs, auxiliaries_str = model.forward_direct(ingredient_quantities.float()) - # get auxiliary losses and accuracies - aux_losses, aux_accuracies, aux_other_metrics = compute_losses_and_accuracies(loss_functions, auxiliaries, auxiliaries_str, outputs, data) - - # compute vae loss - mse_loss = ((ingredient_quantities - x_hat) ** 2).mean().float() - kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mean ** 2 - log_var.exp(), dim=1)).float() - vae_loss = mse_loss + params['beta_vae'] * (params['latent_dim'] / params['nb_ingredients']) * kld_loss - # compute total volume loss to train decoder - # volume_loss = ((ingredient_quantities.sum(dim=1) - x_hat.sum(dim=1)) ** 2).mean().float() - volume_loss = torch.FloatTensor([0]) - - aux_other_metrics = compute_metric_output(aux_other_metrics, data, ingredient_quantities, x_hat) - - indexes_taste_valid = np.argwhere(taste_valid.detach().numpy()).flatten() - if indexes_taste_valid.size > 0: - outputs_taste = model.get_auxiliary(z[indexes_taste_valid], aux_str='taste_reps') - gt = auxiliaries['taste_reps'][indexes_taste_valid] - factor_loss = indexes_taste_valid.size / (0.3 * batch_size)# factor on the loss: if same ratio as actual dataset factor = 1 if there is less data, then the factor decreases, more data, it increases - aux_losses['taste_reps'] = (loss_functions['taste_reps'](outputs_taste, gt) * factor_loss).float() - else: - aux_losses['taste_reps'] = torch.FloatTensor([0]).reshape([]) - aux_accuracies['taste_reps'] = 0 - - # aggregate losses - global_loss = torch.sum(torch.cat([torch.atleast_1d(vae_loss), torch.atleast_1d(volume_loss)] + [torch.atleast_1d(aux_losses[k] * weights[k]) for k in params['auxiliaries_dict'].keys()])) - # for k in params['auxiliaries_dict'].keys(): - # global_loss += aux_losses[k] * weights[k] - - if train: - global_loss.backward() - opt.step() - opt.zero_grad() - - # logging - losses['global_loss'].append(float(global_loss)) - losses['mse_loss'].append(float(mse_loss)) - losses['vae_loss'].append(float(vae_loss)) - losses['volume_loss'].append(float(volume_loss)) - losses['kld_loss'].append(float(kld_loss)) - for k in params['auxiliaries_dict'].keys(): - losses[k].append(float(aux_losses[k])) - accuracies[k].append(float(aux_accuracies[k])) - for k in aux_other_metrics.keys(): - if k not in other_metrics.keys(): - other_metrics[k] = [aux_other_metrics[k]] - else: - other_metrics[k].append(aux_other_metrics[k]) - - for k in losses.keys(): - losses[k] = np.mean(losses[k]) - for k in accuracies.keys(): - accuracies[k] = np.mean(accuracies[k]) - for k in other_metrics.keys(): - other_metrics[k] = np.mean(other_metrics[k], axis=0) - return model, losses, accuracies, other_metrics - -def prepare_data_and_loss(params): - train_data = MyDataset(split='train', params=params) - test_data = MyDataset(split='test', params=params) - - train_data_loader = DataLoader(train_data, batch_size=params['batch_size'], shuffle=True) - test_data_loader = DataLoader(test_data, batch_size=params['batch_size'], shuffle=True) - - loss_functions = dict() - weights = dict() - for k in sorted(params['auxiliaries_dict'].keys()): - if params['auxiliaries_dict'][k]['type'] == 'classif': - if k == 'glasses': - classif_weights = train_data.glasses_weights - elif k == 'prep_type': - classif_weights = train_data.prep_types_weights - elif k == 'categories': - classif_weights = train_data.categories_weights - else: - raise ValueError - loss_functions[k] = nn.CrossEntropyLoss(torch.FloatTensor(classif_weights)) - elif params['auxiliaries_dict'][k]['type'] == 'multiclassif': - loss_functions[k] = nn.BCEWithLogitsLoss() - elif params['auxiliaries_dict'][k]['type'] == 'regression': - loss_functions[k] = nn.MSELoss() - else: - raise ValueError - weights[k] = params['auxiliaries_dict'][k]['weight'] - - - return loss_functions, train_data_loader, test_data_loader, weights - -def print_losses(train, losses, accuracies, other_metrics): - keyword = 'Train' if train else 'Eval' - print(f'\t{keyword} logs:') - keys = ['global_loss', 'vae_loss', 'mse_loss', 'kld_loss', 'volume_loss'] - for k in keys: - print(f'\t\t{k} - Loss: {losses[k]:.2f}') - for k in sorted(accuracies.keys()): - print(f'\t\t{k} (aux) - Loss: {losses[k]:.2f}, Acc: {accuracies[k]:.2f}') - for k in sorted(other_metrics.keys()): - if 'confusion' not in k: - print(f'\t\t{k} - {other_metrics[k]:.2f}') - - -def run_experiment(params, verbose=True): - loss_functions, train_data_loader, test_data_loader, weights = prepare_data_and_loss(params) - params['filter_decoder_output'] = train_data_loader.dataset.filter_decoder_output - - model_params = [params[k] for k in ["input_dim", "deepset_latent_dim", "hidden_dims_ingredients", "activation", - "hidden_dims_cocktail", "hidden_dims_decoder", "nb_ingredients", "latent_dim", "agg", "dropout", "auxiliaries_dict", - "filter_decoder_output"]] - model = get_vae_model(*model_params) - opt = torch.optim.AdamW(model.parameters(), lr=params['lr']) - - - all_train_losses = [] - all_eval_losses = [] - all_train_accuracies = [] - all_eval_accuracies = [] - all_eval_other_metrics = [] - all_train_other_metrics = [] - best_loss = np.inf - model, eval_losses, eval_accuracies, eval_other_metrics = run_epoch(opt=opt, train=False, model=model, data=test_data_loader, loss_functions=loss_functions, - weights=weights, params=params) - all_eval_losses.append(eval_losses) - all_eval_accuracies.append(eval_accuracies) - all_eval_other_metrics.append(eval_other_metrics) - if verbose: print(f'\n--------\nEpoch #0') - if verbose: print_losses(train=False, accuracies=eval_accuracies, losses=eval_losses, other_metrics=eval_other_metrics) - for epoch in range(params['nb_epochs']): - if verbose and (epoch + 1) % params['print_every'] == 0: print(f'\n--------\nEpoch #{epoch+1}') - model, train_losses, train_accuracies, train_other_metrics = run_epoch(opt=opt, train=True, model=model, data=train_data_loader, loss_functions=loss_functions, - weights=weights, params=params) - if verbose and (epoch + 1) % params['print_every'] == 0: print_losses(train=True, accuracies=train_accuracies, losses=train_losses, other_metrics=train_other_metrics) - model, eval_losses, eval_accuracies, eval_other_metrics = run_epoch(opt=opt, train=False, model=model, data=test_data_loader, loss_functions=loss_functions, - weights=weights, params=params) - if verbose and (epoch + 1) % params['print_every'] == 0: print_losses(train=False, accuracies=eval_accuracies, losses=eval_losses, other_metrics=eval_other_metrics) - if eval_losses['global_loss'] < best_loss: - best_loss = eval_losses['global_loss'] - if verbose: print(f'Saving new best model with loss {best_loss:.2f}') - torch.save(model.state_dict(), params['save_path'] + f'checkpoint_best.save') - - # log - all_train_losses.append(train_losses) - all_train_accuracies.append(train_accuracies) - all_eval_losses.append(eval_losses) - all_eval_accuracies.append(eval_accuracies) - all_eval_other_metrics.append(eval_other_metrics) - all_train_other_metrics.append(train_other_metrics) - - # if epoch == params['nb_epoch_switch_beta']: - # params['beta_vae'] = 2.5 - # params['auxiliaries_dict']['prep_type']['weight'] /= 10 - # params['auxiliaries_dict']['glasses']['weight'] /= 10 - - if (epoch + 1) % params['plot_every'] == 0: - - plot_results(all_train_losses, all_train_accuracies, all_train_other_metrics, - all_eval_losses, all_eval_accuracies, all_eval_other_metrics, params['plot_path'], weights) - - return model - -def plot_results(all_train_losses, all_train_accuracies, all_train_other_metrics, - all_eval_losses, all_eval_accuracies, all_eval_other_metrics, plot_path, weights): - - steps = np.arange(len(all_eval_accuracies)) - - loss_keys = sorted(all_train_losses[0].keys()) - acc_keys = sorted(all_train_accuracies[0].keys()) - metrics_keys = sorted(all_train_other_metrics[0].keys()) - - plt.figure() - plt.title('Train losses') - for k in loss_keys: - factor = 1 if k == 'mse_loss' else 1 - if k not in weights.keys(): - plt.plot(steps[1:], [train_loss[k] * factor for train_loss in all_train_losses], label=k) - else: - if weights[k] != 0: - plt.plot(steps[1:], [train_loss[k] * factor for train_loss in all_train_losses], label=k) - - plt.legend() - plt.ylim([0, 4]) - plt.savefig(plot_path + 'train_losses.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Train accuracies') - for k in acc_keys: - if weights[k] != 0: - plt.plot(steps[1:], [train_acc[k] for train_acc in all_train_accuracies], label=k) - plt.legend() - plt.ylim([0, 1]) - plt.savefig(plot_path + 'train_acc.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Train other metrics') - for k in metrics_keys: - if 'confusion' not in k and 'presence' in k: - plt.plot(steps[1:], [train_metric[k] for train_metric in all_train_other_metrics], label=k) - plt.legend() - plt.ylim([0, 1]) - plt.savefig(plot_path + 'train_ing_presence_errors.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Train other metrics') - for k in metrics_keys: - if 'confusion' not in k and 'presence' not in k: - plt.plot(steps[1:], [train_metric[k] for train_metric in all_train_other_metrics], label=k) - plt.legend() - plt.savefig(plot_path + 'train_ing_q_error.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Eval losses') - for k in loss_keys: - factor = 1 if k == 'mse_loss' else 1 - if k not in weights.keys(): - plt.plot(steps, [eval_loss[k] * factor for eval_loss in all_eval_losses], label=k) - else: - if weights[k] != 0: - plt.plot(steps, [eval_loss[k] * factor for eval_loss in all_eval_losses], label=k) - plt.legend() - plt.ylim([0, 4]) - plt.savefig(plot_path + 'eval_losses.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Eval accuracies') - for k in acc_keys: - if weights[k] != 0: - plt.plot(steps, [eval_acc[k] for eval_acc in all_eval_accuracies], label=k) - plt.legend() - plt.ylim([0, 1]) - plt.savefig(plot_path + 'eval_acc.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Eval other metrics') - for k in metrics_keys: - if 'confusion' not in k and 'presence' in k: - plt.plot(steps, [eval_metric[k] for eval_metric in all_eval_other_metrics], label=k) - plt.legend() - plt.ylim([0, 1]) - plt.savefig(plot_path + 'eval_ing_presence_errors.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.figure() - plt.title('Eval other metrics') - for k in metrics_keys: - if 'confusion' not in k and 'presence' not in k: - plt.plot(steps, [eval_metric[k] for eval_metric in all_eval_other_metrics], label=k) - plt.legend() - plt.savefig(plot_path + 'eval_ing_q_error.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - - for k in metrics_keys: - if 'confusion' in k: - plt.figure() - plt.title(k) - plt.ylabel('True') - plt.xlabel('Predicted') - plt.imshow(all_eval_other_metrics[-1][k], vmin=0, vmax=1) - plt.colorbar() - plt.savefig(plot_path + f'eval_{k}.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - for k in metrics_keys: - if 'confusion' in k: - plt.figure() - plt.title(k) - plt.ylabel('True') - plt.xlabel('Predicted') - plt.imshow(all_train_other_metrics[-1][k], vmin=0, vmax=1) - plt.colorbar() - plt.savefig(plot_path + f'train_{k}.png', dpi=200) - fig = plt.gcf() - plt.close(fig) - - plt.close('all') - - -def get_model(model_path): - - with open(model_path + 'params.json', 'r') as f: - params = json.load(f) - params['save_path'] = model_path - max_ing_quantities = np.loadtxt(params['save_path'] + 'max_ing_quantities.txt') - mean_ing_quantities = np.loadtxt(params['save_path'] + 'mean_ing_quantities.txt') - std_ing_quantities = np.loadtxt(params['save_path'] + 'std_ing_quantities.txt') - min_when_present_ing_quantities = np.loadtxt(params['save_path'] + 'min_when_present_ing_quantities.txt') - def filter_decoder_output(output): - output = output.detach().numpy() - output_unnormalized = output * std_ing_quantities + mean_ing_quantities - if output.ndim == 1: - output_unnormalized[np.where(output_unnormalized < min_when_present_ing_quantities)] = 0 - else: - for i in range(output.shape[0]): - output_unnormalized[i, np.where(output_unnormalized[i] < min_when_present_ing_quantities)] = 0 - return output_unnormalized.copy() - params['filter_decoder_output'] = filter_decoder_output - model_chkpt = model_path + "checkpoint_best.save" - model_params = [params[k] for k in ["input_dim", "deepset_latent_dim", "hidden_dims_ingredients", "activation", - "hidden_dims_cocktail", "hidden_dims_decoder", "nb_ingredients", "latent_dim", "agg", "dropout", "auxiliaries_dict", - "filter_decoder_output"]] - model = get_vae_model(*model_params) - model.load_state_dict(torch.load(model_chkpt)) - model.eval() - return model, filter_decoder_output, params - - -def compute_expe_name_and_save_path(params): - weights_str = '[' - for aux in params['auxiliaries_dict'].keys(): - weights_str += f'{params["auxiliaries_dict"][aux]["weight"]}, ' - weights_str = weights_str[:-2] + ']' - save_path = params['save_path'] + params["trial_id"] - save_path += f'_lr{params["lr"]}' - save_path += f'_betavae{params["beta_vae"]}' - save_path += f'_bs{params["batch_size"]}' - save_path += f'_latentdim{params["latent_dim"]}' - save_path += f'_hding{params["hidden_dims_ingredients"]}' - save_path += f'_hdcocktail{params["hidden_dims_cocktail"]}' - save_path += f'_hddecoder{params["hidden_dims_decoder"]}' - save_path += f'_agg{params["agg"]}' - save_path += f'_activ{params["activation"]}' - save_path += f'_w{weights_str}' - counter = 0 - while os.path.exists(save_path + f"_{counter}"): - counter += 1 - save_path = save_path + f"_{counter}" + '/' - params["save_path"] = save_path - os.makedirs(save_path) - os.makedirs(save_path + 'plots/') - params['plot_path'] = save_path + 'plots/' - print(f'logging to {save_path}') - return params - - - -if __name__ == '__main__': - params = get_params() - run_experiment(params) - diff --git a/spaces/ccolas/TastyPiano/src/music/pipeline/music_pipeline.py b/spaces/ccolas/TastyPiano/src/music/pipeline/music_pipeline.py deleted file mode 100644 index 056dd3050eb827723b86d8b90b4a2e31fd701c7f..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/music/pipeline/music_pipeline.py +++ /dev/null @@ -1,88 +0,0 @@ -from src.music.pipeline.url2audio import url2audio -from src.music.pipeline.audio2midi import audio2midi -from src.music.pipeline.midi2processed import midi2processed -from src.music.pipeline.processed2encoded import processed2encoded -from src.music.pipeline.encoded2rep import encoded2rep -from src.music.config import RANDOM_CROP, NB_AUG, FROM_URL_PATH -# from src.music.pipeline.synth2audio import AudioRecorder -# from src.music.pipeline.processed2handcodedrep import processed2handcodedrep -import time -import hashlib - -VERBOSE = True -AUGMENTATION, NOISE_INJECTED = False, False -CROP = 10# crop 30s before transcription - -# AUDIO_RECORDER = AudioRecorder(place='home') - -def encode_music(url=None, - audio_path=None, - midi_path=None, - processed_path=None, - record=False, - crop=CROP, - random_crop=RANDOM_CROP, - augmentation=AUGMENTATION, - noise_injection=NOISE_INJECTED, - apply_filtering=True, - nb_aug=NB_AUG, - level=0, - verbose=VERBOSE): - if not record: assert url is not None or audio_path is not None or midi_path is not None or processed_path is not None - init_time = time.time() - error = '' - try: - if record: - assert audio_path is None and midi_path is None - if verbose: print(' ' * level + 'Processing music, recorded from mic.') - audio_path = AUDIO_RECORDER.record_one() - error = '' - if processed_path is None: - if midi_path is None: - if audio_path is None: - if verbose and not record: print(' ' * level + 'Processing music, from audio source.') - init_t = time.time() - audio_path, _, error = url2audio(playlist_path=FROM_URL_PATH, video_url=url, verbose=verbose, level=level+2) - if verbose: print(' ' * (level + 4) + f'Audio downloaded in {int(time.time() - init_t)} seconds.') - else: - if verbose and not record: print(' ' * level + 'Processing music, from midi source.') - init_t = time.time() - midi_path, error = audio2midi(audio_path, crop=crop, random_crop=random_crop, verbose=verbose, level=level+2) - if verbose: print(' ' * (level + 4) + f'Audio transcribed to midi in {int(time.time() - init_t)} seconds.') - init_t = time.time() - processed_path, error = midi2processed(midi_path, apply_filtering=apply_filtering, verbose=verbose, level=level+2) - if verbose: print(' ' * (level + 4) + f'Midi preprocessed in {int(time.time() - init_t)} seconds.') - init_t = time.time() - encoded_path, error = processed2encoded(processed_path, augmentation=augmentation, nb_aug=nb_aug, noise_injection=noise_injection, verbose=verbose, level=level+2) - if verbose: print(' ' * (level + 4) + f'Midi encoded in {int(time.time() - init_t)} seconds.') - init_t = time.time() - representation_path, representation, error = encoded2rep(encoded_path, return_rep=True, level=level+2, verbose=verbose) - if verbose: print(' ' * (level + 4) + f'Music representation computed in {int(time.time() - init_t)} seconds.') - init_t = time.time() - handcoded_rep_path, handcoded_rep, error = None, None, '' - # handcoded_rep_path, handcoded_rep, error = processed2handcodedrep(processed_path, return_rep=True, level=level+2, verbose=verbose) - if verbose: print(' ' * (level + 4) + f'Music handcoded representation computed in {int(time.time() - init_t)} seconds.') - # assert handcoded_rep_path is not None and representation_path is not None - all_paths = dict(url=url, audio_path=audio_path, midi_path=midi_path, processed_path=processed_path, encoded_path=encoded_path, - representation_path=representation_path, handcoded_rep_path=handcoded_rep_path) - if audio_path is not None: - print('audio hash: ', hashlib.md5(open(audio_path, 'rb').read()).hexdigest()) - if midi_path is not None: - print('midi hash: ', hashlib.md5(open(midi_path, 'rb').read()).hexdigest()) - print('processed hash: ', hashlib.md5(open(processed_path, 'rb').read()).hexdigest()) - print('encoded hash: ', hashlib.md5(open(encoded_path, 'rb').read()).hexdigest()) - print('rep hash: ', hashlib.md5(open(representation_path, 'rb').read()).hexdigest()) - print("rep:", representation[:10]) - if verbose: print(' ' * (level + 2) + f'Music processed in {int(time.time() - init_time)} seconds.') - except Exception as err: - print(err, error) - if verbose: print(' ' * (level + 2) + f'Music FAILED to process in {int(time.time() - init_time)} seconds.') - representation = None - handcoded_rep = None - all_paths = dict() - - return representation, handcoded_rep, all_paths, error - -if __name__ == '__main__': - representation = encode_music(url="https://www.youtube.com/watch?v=a2LFVWBmoiw")[0] - # representation = encode_music(record=True)[0] \ No newline at end of file diff --git a/spaces/ccolas/TastyPiano/src/music/utilities/__init__.py b/spaces/ccolas/TastyPiano/src/music/utilities/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/defaults.py b/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/defaults.py deleted file mode 100644 index e0d678337432bbcced7d525d0cfa9be43b712f59..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/defaults.py +++ /dev/null @@ -1,21 +0,0 @@ -'''Default configuration for THT''' - -from src.music.utilities.handcoded_rep_utilities.tht import similarity, confidence, correction - -eval_f = confidence.windowed_conf -corr_f = correction.windowed_corr -sim_f = similarity.min_dist_sim -similarity_epsilon = 0.005 -max_delta = (60000.0 / 40) # 40 bpm -min_delta = (60000.0 / 320) # 320 bpm -max_hypotheses = 30 - -config = { - 'eval_f': eval_f, - 'corr_f': corr_f, - 'sim_f': sim_f, - 'similarity_epsilon': similarity_epsilon, - 'max_delta': max_delta, - 'min_delta': min_delta, - 'max_hypotheses': max_hypotheses -} diff --git a/spaces/cfwef/gpt/show_math.py b/spaces/cfwef/gpt/show_math.py deleted file mode 100644 index 80fa881d1c2ace5813f75b5d8a19ca056a8bfa4f..0000000000000000000000000000000000000000 --- a/spaces/cfwef/gpt/show_math.py +++ /dev/null @@ -1,80 +0,0 @@ -# This program is written by: https://github.com/polarwinkel/mdtex2html - -from latex2mathml.converter import convert as tex2mathml -import re - -incomplete = 'formula incomplete' -convError = 'LaTeX-convert-error' - -def convert(mdtex, extensions=[], splitParagraphs=True): - ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML ''' - found = False - # handle all paragraphs separately (prevents aftereffects) - if splitParagraphs: - parts = re.split("\n\n", mdtex) - result = '' - for part in parts: - result += convert(part, extensions, splitParagraphs=False) - return result - # find first $$-formula: - parts = re.split('\${2}', mdtex, 2) - if len(parts)>1: - found = True - result = convert(parts[0], extensions, splitParagraphs=False)+'\n' - try: - result += '
      '+tex2mathml(parts[1])+'
      \n' - except: - result += '
      '+convError+'
      ' - if len(parts)==3: - result += convert(parts[2], extensions, splitParagraphs=False) - else: - result += '
      '+incomplete+'
      ' - # else find first $-formulas: - else: - parts = re.split('\${1}', mdtex, 2) - if len(parts)>1 and not found: - found = True - try: - mathml = tex2mathml(parts[1]) - except: - mathml = convError - if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula! - parts[0]=parts[0]+'​' - if len(parts)==3: - result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False) - else: - result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False) - # else find first \[..\]-equation: - else: - parts = re.split(r'\\\[', mdtex, 1) - if len(parts)>1 and not found: - found = True - result = convert(parts[0], extensions, splitParagraphs=False)+'\n' - parts = re.split(r'\\\]', parts[1], 1) - try: - result += '
      '+tex2mathml(parts[0])+'
      \n' - except: - result += '
      '+convError+'
      ' - if len(parts)==2: - result += convert(parts[1], extensions, splitParagraphs=False) - else: - result += '
      '+incomplete+'
      ' - # else find first \(..\)-equation: - else: - parts = re.split(r'\\\(', mdtex, 1) - if len(parts)>1 and not found: - found = True - subp = re.split(r'\\\)', parts[1], 1) - try: - mathml = tex2mathml(subp[0]) - except: - mathml = convError - if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula! - parts[0]=parts[0]+'​' - if len(subp)==2: - result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False) - else: - result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False) - if not found: - result = mdtex - return result diff --git a/spaces/cha0smagick/RPG_Character_generator/README.md b/spaces/cha0smagick/RPG_Character_generator/README.md deleted file mode 100644 index 91743c598df9801fefcf02ed37f5946ff0df5309..0000000000000000000000000000000000000000 --- a/spaces/cha0smagick/RPG_Character_generator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RPG Character Generator -emoji: 📉 -colorFrom: yellow -colorTo: pink -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/generation/flax_utils.py b/spaces/chendl/compositional_test/transformers/src/transformers/generation/flax_utils.py deleted file mode 100644 index 4ff1164c88e91a4c37c864756d75e6ce74033c48..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/generation/flax_utils.py +++ /dev/null @@ -1,1004 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team. -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy -import inspect -import warnings -from functools import partial -from typing import Any, Dict, Optional, Union - -import flax -import jax -import jax.numpy as jnp -import numpy as np -from jax import lax - -from ..models.auto import ( - FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, - FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, - FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, -) -from ..utils import ModelOutput, logging -from .configuration_utils import GenerationConfig -from .flax_logits_process import ( - FlaxForcedBOSTokenLogitsProcessor, - FlaxForcedEOSTokenLogitsProcessor, - FlaxForceTokensLogitsProcessor, - FlaxLogitsProcessorList, - FlaxMinLengthLogitsProcessor, - FlaxSuppressTokensAtBeginLogitsProcessor, - FlaxSuppressTokensLogitsProcessor, - FlaxTemperatureLogitsWarper, - FlaxTopKLogitsWarper, - FlaxTopPLogitsWarper, -) - - -logger = logging.get_logger(__name__) - - -@flax.struct.dataclass -class FlaxGreedySearchOutput(ModelOutput): - """ - Flax Base class for outputs of decoder-only generation models using greedy search. - - - Args: - sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): - The generated sequences. - """ - - sequences: jnp.ndarray = None - - -@flax.struct.dataclass -class FlaxSampleOutput(ModelOutput): - """ - Flax Base class for outputs of decoder-only generation models using sampling. - - - Args: - sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): - The generated sequences. - """ - - sequences: jnp.ndarray = None - - -@flax.struct.dataclass -class FlaxBeamSearchOutput(ModelOutput): - """ - Flax Base class for outputs of decoder-only generation models using greedy search. - - - Args: - sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): - The generated sequences. - scores (`jnp.ndarray` of shape `(batch_size,)`): - The scores (log probabilities) of the generated sequences. - """ - - sequences: jnp.ndarray = None - scores: jnp.ndarray = None - - -@flax.struct.dataclass -class GreedyState: - cur_len: jnp.ndarray - sequences: jnp.ndarray - running_token: jnp.ndarray - is_sent_finished: jnp.ndarray - model_kwargs: Dict[str, jnp.ndarray] - - -@flax.struct.dataclass -class SampleState: - cur_len: jnp.ndarray - sequences: jnp.ndarray - running_token: jnp.ndarray - is_sent_finished: jnp.ndarray - prng_key: jnp.ndarray - model_kwargs: Dict[str, jnp.ndarray] - - -@flax.struct.dataclass -class BeamSearchState: - cur_len: jnp.ndarray - running_sequences: jnp.ndarray - running_scores: jnp.ndarray - sequences: jnp.ndarray - scores: jnp.ndarray - is_sent_finished: jnp.ndarray - model_kwargs: Dict[str, jnp.ndarray] - - -class FlaxGenerationMixin: - """ - A class containing all functions for auto-regressive text generation, to be used as a mixin in - [`FlaxPreTrainedModel`]. - - The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for: - - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and - `do_sample=False` - - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and - `do_sample=True` - - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and - `do_sample=False` - - You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To - learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). - """ - - def prepare_inputs_for_generation(self, *args, **kwargs): - raise NotImplementedError( - "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." - ) - - @staticmethod - def _run_loop_in_debug(cond_fn, body_fn, init_state): - """ - Run generation in untraced mode. This should only be used for debugging purposes. - """ - state = init_state - while cond_fn(state): - state = body_fn(state) - return state - - def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs): - encoder_kwargs = { - argument: value - for argument, value in model_kwargs.items() - if not (argument.startswith("decoder_") or argument.startswith("cross_attn")) - } - model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs) - return model_kwargs - - def _prepare_decoder_input_ids_for_generation( - self, - batch_size: int, - decoder_start_token_id: int = None, - bos_token_id: int = None, - model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, - ) -> jnp.ndarray: - if model_kwargs is not None and "decoder_input_ids" in model_kwargs: - # Only use this arg if not None, otherwise just remove from model_kwargs - decoder_input_ids = model_kwargs.pop("decoder_input_ids") - if decoder_input_ids is not None: - return decoder_input_ids - decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) - return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0) - - def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: - # retrieve decoder_start_token_id for encoder-decoder models - # fall back to bos_token_id if necessary - decoder_start_token_id = ( - decoder_start_token_id - if decoder_start_token_id is not None - else self.generation_config.decoder_start_token_id - ) - bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id - if decoder_start_token_id is not None: - return decoder_start_token_id - elif ( - hasattr(self.config, "decoder") - and hasattr(self.config.decoder, "decoder_start_token_id") - and self.config.decoder.decoder_start_token_id is not None - ): - return self.config.decoder.decoder_start_token_id - elif bos_token_id is not None: - return bos_token_id - elif ( - hasattr(self.config, "decoder") - and hasattr(self.config.decoder, "bos_token_id") - and self.config.decoder.bos_token_id is not None - ): - return self.config.decoder.bos_token_id - raise ValueError( - "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." - ) - - @staticmethod - def _expand_to_num_beams(tensor, num_beams): - return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:]) - - def _adapt_logits_for_beam_search(self, logits): - """ - This function can be overwritten in the specific modeling_flax_.py classes to allow for custom beam - search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`]. - """ - return logits - - def _validate_model_class(self): - """ - Confirms that the model class is compatible with generation. If not, raises an exception that points to the - right class to use. - """ - if not self.can_generate(): - generate_compatible_mappings = [ - FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, - FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, - FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, - ] - generate_compatible_classes = set() - for model_mapping in generate_compatible_mappings: - supported_models = model_mapping.get(type(self.config), default=None) - if supported_models is not None: - generate_compatible_classes.add(supported_models.__name__) - exception_message = ( - f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " - "it doesn't have a language model head." - ) - if generate_compatible_classes: - exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" - raise TypeError(exception_message) - - def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): - """Validates model kwargs for generation. Generate argument typos will also be caught here.""" - unused_model_args = [] - model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) - # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If - # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) - if "kwargs" in model_args or "model_kwargs" in model_args: - model_args |= set(inspect.signature(self.__call__).parameters) - for key, value in model_kwargs.items(): - if value is not None and key not in model_args: - unused_model_args.append(key) - - if unused_model_args: - raise ValueError( - f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" - " generate arguments will also show up in this list)" - ) - - def generate( - self, - input_ids: jnp.ndarray, - generation_config: Optional[GenerationConfig] = None, - prng_key: Optional[jnp.ndarray] = None, - trace: bool = True, - params: Optional[Dict[str, jnp.ndarray]] = None, - logits_processor: Optional[FlaxLogitsProcessorList] = None, - **kwargs, - ): - r""" - Generates sequences of token ids for models with a language modeling head. - - Parameters: - input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): - The sequence used as a prompt for the generation. - generation_config (`~generation.GenerationConfig`, *optional*): - The generation configuration to be used as base parametrization for the generation call. `**kwargs` - passed to generate matching the attributes of `generation_config` will override them. If - `generation_config` is not provided, the default will be used, which had the following loading - priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model - configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s - default values, whose documentation should be checked to parameterize generation. - trace (`bool`, *optional*, defaults to `True`): - Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a - considerably slower runtime. - params (`Dict[str, jnp.ndarray]`, *optional*): - Optionally the model parameters can be passed. Can be useful for parallelized generation. - logits_processor (`FlaxLogitsProcessorList `, *optional*): - Custom logits processors that complement the default logits processors built from arguments and - generation config. If a logit processor is passed that is already created with the arguments or a - generation config an error is thrown. This feature is intended for advanced users. - kwargs: - Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be - forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder - specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. - - Return: - [`~utils.ModelOutput`]. - - """ - # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call - self._validate_model_class() - - # priority: `generation_config` argument > `model.generation_config` (the default generation config) - if generation_config is None: - # legacy: users may modify the model configuration to control generation -- update the generation config - # model attribute accordingly, if it was created from the model config - if self.generation_config._from_model_config: - new_generation_config = GenerationConfig.from_model_config(self.config) - if new_generation_config != self.generation_config: - warnings.warn( - "You have modified the pretrained model configuration to control generation. This is a" - " deprecated strategy to control generation and will be removed soon, in a future version." - " Please use a generation configuration file (see" - " https://huggingface.co/docs/transformers/main_classes/text_generation)" - ) - self.generation_config = new_generation_config - generation_config = self.generation_config - - generation_config = copy.deepcopy(generation_config) - model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs - generation_config.validate() - self._validate_model_kwargs(model_kwargs.copy()) - - logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList() - - # set init values - prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) - - if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: - if model_kwargs.get("attention_mask") is None: - logger.warning( - "The attention mask and the pad token id were not set. As a consequence, you may observe " - "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." - ) - eos_token_id = generation_config.eos_token_id - if isinstance(eos_token_id, list): - eos_token_id = eos_token_id[0] - logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") - generation_config.pad_token_id = eos_token_id - - if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder: - raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.") - - # decoder-only models should use left-padding for generation (can't be checked with `trace=True`) - if not self.config.is_encoder_decoder and not trace: - if ( - generation_config.pad_token_id is not None - and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0 - ): - logger.warning( - "A decoder-only architecture is being used, but right-padding was detected! For correct " - "generation results, please set `padding_side='left'` when initializing the tokenizer." - ) - - batch_size = input_ids.shape[0] - - if self.config.is_encoder_decoder: - # add encoder_outputs to model_kwargs - if model_kwargs.get("encoder_outputs") is None: - model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs) - # prepare decoder_input_ids for generation - input_ids = self._prepare_decoder_input_ids_for_generation( - batch_size, - decoder_start_token_id=generation_config.decoder_start_token_id, - bos_token_id=generation_config.bos_token_id, - model_kwargs=model_kwargs, - ) - - # Prepare `max_length` depending on other stopping criteria. - input_ids_seq_length = input_ids.shape[-1] - has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None - if has_default_max_length and generation_config.max_new_tokens is None: - warnings.warn( - f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " - "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" - " recommend using `max_new_tokens` to control the maximum length of the generation.", - UserWarning, - ) - elif generation_config.max_new_tokens is not None: - generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length - if not has_default_max_length: - logger.warn( - f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" - f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " - "Please refer to the documentation for more information. " - "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", - UserWarning, - ) - - if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: - raise ValueError( - f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than" - f" the maximum length ({generation_config.max_length})" - ) - if input_ids_seq_length >= generation_config.max_length: - input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" - logger.warning( - f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" - f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" - " increasing`max_new_tokens`." - ) - - logits_processor = self._get_logits_processor( - generation_config=generation_config, - input_ids_seq_length=input_ids_seq_length, - logits_processor=logits_processor, - ) - - if not generation_config.do_sample and generation_config.num_beams == 1: - return self._greedy_search( - input_ids, - generation_config.max_length, - generation_config.pad_token_id, - generation_config.eos_token_id, - logits_processor=logits_processor, - trace=trace, - params=params, - model_kwargs=model_kwargs, - ) - elif generation_config.do_sample and generation_config.num_beams == 1: - logits_warper = self._get_logits_warper(generation_config=generation_config) - return self._sample( - input_ids, - generation_config.max_length, - generation_config.pad_token_id, - generation_config.eos_token_id, - prng_key, - logits_warper=logits_warper, - logits_processor=logits_processor, - trace=trace, - params=params, - model_kwargs=model_kwargs, - ) - elif not generation_config.do_sample and generation_config.num_beams > 1: - # broadcast input_ids & encoder_outputs - input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams) - - if "encoder_outputs" in model_kwargs: - model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams( - model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams - ) - - for kwarg in ["attention_mask", "decoder_attention_mask"]: - if kwarg in model_kwargs: - model_kwargs[kwarg] = self._expand_to_num_beams( - model_kwargs[kwarg], num_beams=generation_config.num_beams - ) - - return self._beam_search( - input_ids, - generation_config.max_length, - generation_config.pad_token_id, - generation_config.eos_token_id, - length_penalty=generation_config.length_penalty, - early_stopping=generation_config.early_stopping, - logits_processor=logits_processor, - trace=trace, - params=params, - model_kwargs=model_kwargs, - ) - else: - raise NotImplementedError("`Beam sampling is currently not implemented.") - - def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList: - """ - This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`] - instances used for multinomial sampling. - """ - warpers = FlaxLogitsProcessorList() - - if generation_config.temperature is not None and generation_config.temperature != 1.0: - warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature)) - if generation_config.top_k is not None and generation_config.top_k != 0: - warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1)) - if generation_config.top_p is not None and generation_config.top_p < 1.0: - warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1)) - - return warpers - - def _get_logits_processor( - self, - generation_config: GenerationConfig, - input_ids_seq_length: int, - logits_processor: Optional[FlaxLogitsProcessorList], - ) -> FlaxLogitsProcessorList: - """ - This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`] - instances used to modify the scores of the language model head. - """ - processors = FlaxLogitsProcessorList() - - if ( - generation_config.min_length is not None - and generation_config.eos_token_id is not None - and generation_config.min_length > -1 - ): - processors.append( - FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id) - ) - if generation_config.forced_bos_token_id is not None: - processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) - if generation_config.forced_eos_token_id is not None: - processors.append( - FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) - ) - if generation_config.suppress_tokens is not None: - processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) - if generation_config.begin_suppress_tokens is not None: - begin_index = input_ids_seq_length - begin_index = ( - begin_index - if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) - else begin_index + 1 - ) - if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0: - # generation starts after the last token that is forced - begin_index += generation_config.forced_decoder_ids[-1][0] - processors.append( - FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) - ) - if generation_config.forced_decoder_ids is not None: - forced_decoder_ids = [ - [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids - ] - processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids)) - processors = self._merge_criteria_processor_list(processors, logits_processor) - - return processors - - def _merge_criteria_processor_list( - self, - default_list: FlaxLogitsProcessorList, - custom_list: FlaxLogitsProcessorList, - ) -> FlaxLogitsProcessorList: - if len(custom_list) == 0: - return default_list - for default in default_list: - for custom in custom_list: - if type(custom) is type(default): - object_type = "logits processor" - raise ValueError( - f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" - f" `generate`, but it has already been created with the values {default}. {default} has been" - " created by passing the corresponding arguments to generate or by the model's config default" - f" values. If you just want to change the default values of {object_type} consider passing" - f" them as arguments to `generate` instead of using a custom {object_type}." - ) - default_list.extend(custom_list) - return default_list - - def _greedy_search( - self, - input_ids: None, - max_length: Optional[int] = None, - pad_token_id: Optional[int] = None, - eos_token_id: Optional[int] = None, - logits_processor: Optional[FlaxLogitsProcessorList] = None, - trace: bool = True, - params: Optional[Dict[str, jnp.ndarray]] = None, - model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, - ): - # init values - max_length = max_length if max_length is not None else self.generation_config.max_length - pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id - eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id - - batch_size, cur_len = input_ids.shape - - eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) - pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) - cur_len = jnp.array(cur_len) - - # per batch-item holding current token in loop. - sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) - sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) - - # per batch-item state bit indicating if sentence has finished. - is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) - - # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop - # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. - model = self.decode if self.config.is_encoder_decoder else self - # initialize model specific kwargs - model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) - - # initialize state - state = GreedyState( - cur_len=cur_len, - sequences=sequences, - running_token=input_ids, - is_sent_finished=is_sent_finished, - model_kwargs=model_kwargs, - ) - - def greedy_search_cond_fn(state): - """state termination condition fn.""" - has_reached_max_length = state.cur_len == max_length - all_sequence_finished = jnp.all(state.is_sent_finished) - finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) - return ~finish_generation - - def greedy_search_body_fn(state): - """state update fn.""" - model_outputs = model(state.running_token, params=params, **state.model_kwargs) - logits = model_outputs.logits[:, -1] - - # apply min_length, ... - logits = logits_processor(state.sequences, logits, state.cur_len) - - next_token = jnp.argmax(logits, axis=-1) - - next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished - next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) - next_token = next_token[:, None] - - next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) - next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) - return GreedyState( - cur_len=state.cur_len + 1, - sequences=next_sequences, - running_token=next_token, - is_sent_finished=next_is_sent_finished, - model_kwargs=next_model_kwargs, - ) - - # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU - if input_ids.shape[1] > 1: - state = greedy_search_body_fn(state) - - if not trace: - state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state) - else: - state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state) - - return FlaxGreedySearchOutput(sequences=state.sequences) - - def _sample( - self, - input_ids: None, - max_length: Optional[int] = None, - pad_token_id: Optional[int] = None, - eos_token_id: Optional[int] = None, - prng_key: Optional[jnp.ndarray] = None, - logits_processor: Optional[FlaxLogitsProcessorList] = None, - logits_warper: Optional[FlaxLogitsProcessorList] = None, - trace: bool = True, - params: Optional[Dict[str, jnp.ndarray]] = None, - model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, - ): - # init values - max_length = max_length if max_length is not None else self.generation_config.max_length - pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id - eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id - prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) - - batch_size, cur_len = input_ids.shape - - eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) - pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) - cur_len = jnp.array(cur_len) - - # per batch-item holding current token in loop. - sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) - sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) - - # per batch-item state bit indicating if sentence has finished. - is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) - - # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop - # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. - model = self.decode if self.config.is_encoder_decoder else self - - # initialize model specific kwargs - model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) - - # initialize state - state = SampleState( - cur_len=cur_len, - sequences=sequences, - running_token=input_ids, - is_sent_finished=is_sent_finished, - prng_key=prng_key, - model_kwargs=model_kwargs, - ) - - def sample_search_cond_fn(state): - """state termination condition fn.""" - has_reached_max_length = state.cur_len == max_length - all_sequence_finished = jnp.all(state.is_sent_finished) - finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) - return ~finish_generation - - def sample_search_body_fn(state): - """state update fn.""" - prng_key, prng_key_next = jax.random.split(state.prng_key) - model_outputs = model(state.running_token, params=params, **state.model_kwargs) - - logits = model_outputs.logits[:, -1] - - # apply min_length, ... - logits = logits_processor(state.sequences, logits, state.cur_len) - # apply top_p, top_k, temperature - logits = logits_warper(logits, logits, state.cur_len) - - next_token = jax.random.categorical(prng_key, logits, axis=-1) - - next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) - next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished - next_token = next_token[:, None] - - next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) - next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) - - return SampleState( - cur_len=state.cur_len + 1, - sequences=next_sequences, - running_token=next_token, - is_sent_finished=next_is_sent_finished, - model_kwargs=next_model_kwargs, - prng_key=prng_key_next, - ) - - # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU - if input_ids.shape[1] > 1: - state = sample_search_body_fn(state) - - if not trace: - state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state) - else: - state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) - - return FlaxSampleOutput(sequences=state.sequences) - - def _beam_search( - self, - input_ids: None, - max_length: Optional[int] = None, - pad_token_id: Optional[int] = None, - eos_token_id: Optional[int] = None, - length_penalty: Optional[float] = None, - early_stopping: Optional[Union[bool, str]] = None, - logits_processor: Optional[FlaxLogitsProcessorList] = None, - trace: bool = True, - params: Optional[Dict[str, jnp.ndarray]] = None, - model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, - ): - """ - This beam search function is heavily inspired by Flax's official example: - https://github.com/google/flax/blob/main/examples/wmt/decode.py - """ - - def flatten_beam_dim(tensor): - """Flattens the first two dimensions of a non-scalar array.""" - # ignore scalars (e.g. cache index) - if tensor.ndim == 0: - return tensor - return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:]) - - def unflatten_beam_dim(tensor, batch_size, num_beams): - """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" - # ignore scalars (e.g. cache index) - if tensor.ndim == 0: - return tensor - return tensor.reshape((batch_size, num_beams) + tensor.shape[1:]) - - def gather_beams(nested, beam_indices, batch_size, new_num_beams): - """ - Gathers the beam slices indexed by beam_indices into new beam array. - """ - batch_indices = jnp.reshape( - jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams) - ) - - def gather_fn(tensor): - # ignore scalars (e.g. cache index) - if tensor.ndim == 0: - return tensor - else: - return tensor[batch_indices, beam_indices] - - return jax.tree_util.tree_map(gather_fn, nested) - - # init values - max_length = max_length if max_length is not None else self.generation_config.max_length - pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id - eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id - length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty - early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping - - batch_size, num_beams, cur_len = input_ids.shape - - eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) - pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) - cur_len = jnp.array(cur_len) - - # per batch,beam-item holding current token in loop. - sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) - running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) - running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0)) - - # per batch,beam-item state bit indicating if sentence has finished. - is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_) - - # per batch,beam-item score, logprobs - running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1]) - scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7) - - # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop - # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. - model = self.decode if self.config.is_encoder_decoder else self - - # flatten beam dim - if "encoder_outputs" in model_kwargs: - model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( - model_kwargs["encoder_outputs"]["last_hidden_state"] - ) - for kwarg in ["attention_mask", "decoder_attention_mask"]: - if kwarg in model_kwargs: - model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg]) - - # initialize model specific kwargs - model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs) - - # initialize state - state = BeamSearchState( - cur_len=cur_len, - running_sequences=running_sequences, - running_scores=running_scores, - sequences=sequences, - scores=scores, - is_sent_finished=is_sent_finished, - model_kwargs=model_kwargs, - ) - - def beam_search_cond_fn(state): - """beam search state termination condition fn.""" - - # 1. is less than max length? - not_max_length_yet = state.cur_len < max_length - - # 2. can the new beams still improve? - # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion - # below for more details. - # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 - # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of - # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. - if early_stopping == "never" and length_penalty > 0.0: - best_running_score = state.running_scores[:, :1] / (max_length**length_penalty) - else: - best_running_score = state.running_scores[:, :1] / (state.cur_len**length_penalty) - worst_finished_score = jnp.where( - state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7) - ) - improvement_still_possible = jnp.any(best_running_score > worst_finished_score) - - # 3. is there still a beam that has not finished? - still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True)) - - return not_max_length_yet & still_open_beam & improvement_still_possible - - def beam_search_body_fn(state, input_ids_length=1): - """beam search state update fn.""" - # 1. Forward current tokens - # Collect the current position slice along length to feed the fast - # autoregressive decoder model. Flatten the beam dimension into batch - # dimension for feeding into the model. - # unflatten beam dimension - # Unflatten beam dimension in attention cache arrays - input_token = flatten_beam_dim( - lax.dynamic_slice( - state.running_sequences, - (0, 0, state.cur_len - input_ids_length), - (batch_size, num_beams, input_ids_length), - ) - ) - model_outputs = model(input_token, params=params, **state.model_kwargs) - - logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams) - cache = jax.tree_util.tree_map( - lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values - ) - - # adapt logits for FlaxMarianMTModel - logits = self._adapt_logits_for_beam_search(logits) - - # 2. Compute log probs - # get log probabilities from logits, - # process logits with processors (*e.g.* min_length, ...), and - # add new logprobs to existing running logprobs scores. - log_probs = jax.nn.log_softmax(logits) - log_probs = logits_processor( - flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len - ) - log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams) - log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2) - vocab_size = log_probs.shape[2] - log_probs = log_probs.reshape((batch_size, num_beams * vocab_size)) - - # 3. Retrieve top-K - # Each item in batch has num_beams * vocab_size candidate sequences. - # For each item, get the top 2*k candidates with the highest log- - # probabilities. We gather the top 2*K beams here so that even if the best - # K sequences reach EOS simultaneously, we have another K sequences - # remaining to continue the live beam search. - # Gather the top 2*K scores from _all_ beams. - # Gather 2*k top beams. - # Recover the beam index by floor division. - # Recover token id by modulo division and expand Id array for broadcasting. - # Update sequences for the 2*K top-k new sequences. - beams_to_keep = 2 * num_beams - topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep) - topk_beam_indices = topk_indices // vocab_size - topk_running_sequences = gather_beams( - state.running_sequences, topk_beam_indices, batch_size, beams_to_keep - ) - topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2) - topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len)) - - # 4. Check which sequences have ended - # Update current sequences: - # Did any of these sequences reach an end marker? - # To prevent these just finished sequences from being added to the current sequences - # set of active beam search sequences, set their log probs to a very large - # negative value. - did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id - running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7) - # 5. Get running sequences scores for next - # Determine the top k beam indices (from top 2*k beams) from log probs - # and gather top k beams (from top 2*k beams). - next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1] - next_running_sequences, next_running_scores = gather_beams( - [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams - ) - - # 6. Process topk logits - # Further process log probs: - # - add length penalty - # - make sure no scores can be added anymore if beam is full - # - make sure still running sequences cannot be chosen as finalized beam - topk_log_probs = topk_log_probs / (state.cur_len**length_penalty) - beams_in_batch_are_full = jnp.broadcast_to( - state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape - ) & (early_stopping is True) - add_penalty = ~did_topk_just_finished | beams_in_batch_are_full - topk_log_probs += add_penalty * np.array(-1.0e7) - - # 7. Get scores, sequences, is sentence finished for next. - # Combine sequences, scores, and flags along the beam dimension and compare - # new finished sequence scores to existing finished scores and select the - # best from the new set of beams - merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1) - merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1) - merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1) - topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1] - next_sequences, next_scores, next_is_sent_finished = gather_beams( - [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams - ) - - # 8. Update model kwargs. - # Determine the top k beam indices from the original set of all beams. - # With these, gather the top k beam-associated caches. - next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams) - next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams) - model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache) - next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) - - return BeamSearchState( - cur_len=state.cur_len + 1, - running_scores=next_running_scores, - running_sequences=next_running_sequences, - scores=next_scores, - sequences=next_sequences, - is_sent_finished=next_is_sent_finished, - model_kwargs=next_model_kwargs, - ) - - # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU - if input_ids.shape[-1] > 1: - state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state) - - if not trace: - state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state) - else: - state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state) - - # Account for the edge-case where there are no finished sequences for a - # particular batch item. If so, return running sequences for that batch item. - none_finished = jnp.any(state.is_sent_finished, axis=1) - sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences) - scores = jnp.where(none_finished[:, None], state.scores, state.running_scores) - - # take best beam for each batch - sequences = sequences[:, 0] - scores = scores[:, 0] - - return FlaxBeamSearchOutput(sequences=sequences, scores=scores) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImImagePlugin.py deleted file mode 100644 index 746743f658cf3fa2e0022ae049808eb68d3d1221..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImImagePlugin.py +++ /dev/null @@ -1,371 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# IFUNC IM file handling for PIL -# -# history: -# 1995-09-01 fl Created. -# 1997-01-03 fl Save palette images -# 1997-01-08 fl Added sequence support -# 1997-01-23 fl Added P and RGB save support -# 1997-05-31 fl Read floating point images -# 1997-06-22 fl Save floating point images -# 1997-08-27 fl Read and save 1-bit images -# 1998-06-25 fl Added support for RGB+LUT images -# 1998-07-02 fl Added support for YCC images -# 1998-07-15 fl Renamed offset attribute to avoid name clash -# 1998-12-29 fl Added I;16 support -# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) -# 2003-09-26 fl Added LA/PA support -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-2001 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - - -import os -import re - -from . import Image, ImageFile, ImagePalette - -# -------------------------------------------------------------------- -# Standard tags - -COMMENT = "Comment" -DATE = "Date" -EQUIPMENT = "Digitalization equipment" -FRAMES = "File size (no of images)" -LUT = "Lut" -NAME = "Name" -SCALE = "Scale (x,y)" -SIZE = "Image size (x*y)" -MODE = "Image type" - -TAGS = { - COMMENT: 0, - DATE: 0, - EQUIPMENT: 0, - FRAMES: 0, - LUT: 0, - NAME: 0, - SCALE: 0, - SIZE: 0, - MODE: 0, -} - -OPEN = { - # ifunc93/p3cfunc formats - "0 1 image": ("1", "1"), - "L 1 image": ("1", "1"), - "Greyscale image": ("L", "L"), - "Grayscale image": ("L", "L"), - "RGB image": ("RGB", "RGB;L"), - "RLB image": ("RGB", "RLB"), - "RYB image": ("RGB", "RLB"), - "B1 image": ("1", "1"), - "B2 image": ("P", "P;2"), - "B4 image": ("P", "P;4"), - "X 24 image": ("RGB", "RGB"), - "L 32 S image": ("I", "I;32"), - "L 32 F image": ("F", "F;32"), - # old p3cfunc formats - "RGB3 image": ("RGB", "RGB;T"), - "RYB3 image": ("RGB", "RYB;T"), - # extensions - "LA image": ("LA", "LA;L"), - "PA image": ("LA", "PA;L"), - "RGBA image": ("RGBA", "RGBA;L"), - "RGBX image": ("RGBX", "RGBX;L"), - "CMYK image": ("CMYK", "CMYK;L"), - "YCC image": ("YCbCr", "YCbCr;L"), -} - -# ifunc95 extensions -for i in ["8", "8S", "16", "16S", "32", "32F"]: - OPEN[f"L {i} image"] = ("F", f"F;{i}") - OPEN[f"L*{i} image"] = ("F", f"F;{i}") -for i in ["16", "16L", "16B"]: - OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}") - OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}") -for i in ["32S"]: - OPEN[f"L {i} image"] = ("I", f"I;{i}") - OPEN[f"L*{i} image"] = ("I", f"I;{i}") -for i in range(2, 33): - OPEN[f"L*{i} image"] = ("F", f"F;{i}") - - -# -------------------------------------------------------------------- -# Read IM directory - -split = re.compile(rb"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$") - - -def number(s): - try: - return int(s) - except ValueError: - return float(s) - - -## -# Image plugin for the IFUNC IM file format. - - -class ImImageFile(ImageFile.ImageFile): - format = "IM" - format_description = "IFUNC Image Memory" - _close_exclusive_fp_after_loading = False - - def _open(self): - # Quick rejection: if there's not an LF among the first - # 100 bytes, this is (probably) not a text header. - - if b"\n" not in self.fp.read(100): - msg = "not an IM file" - raise SyntaxError(msg) - self.fp.seek(0) - - n = 0 - - # Default values - self.info[MODE] = "L" - self.info[SIZE] = (512, 512) - self.info[FRAMES] = 1 - - self.rawmode = "L" - - while True: - s = self.fp.read(1) - - # Some versions of IFUNC uses \n\r instead of \r\n... - if s == b"\r": - continue - - if not s or s == b"\0" or s == b"\x1A": - break - - # FIXME: this may read whole file if not a text file - s = s + self.fp.readline() - - if len(s) > 100: - msg = "not an IM file" - raise SyntaxError(msg) - - if s[-2:] == b"\r\n": - s = s[:-2] - elif s[-1:] == b"\n": - s = s[:-1] - - try: - m = split.match(s) - except re.error as e: - msg = "not an IM file" - raise SyntaxError(msg) from e - - if m: - k, v = m.group(1, 2) - - # Don't know if this is the correct encoding, - # but a decent guess (I guess) - k = k.decode("latin-1", "replace") - v = v.decode("latin-1", "replace") - - # Convert value as appropriate - if k in [FRAMES, SCALE, SIZE]: - v = v.replace("*", ",") - v = tuple(map(number, v.split(","))) - if len(v) == 1: - v = v[0] - elif k == MODE and v in OPEN: - v, self.rawmode = OPEN[v] - - # Add to dictionary. Note that COMMENT tags are - # combined into a list of strings. - if k == COMMENT: - if k in self.info: - self.info[k].append(v) - else: - self.info[k] = [v] - else: - self.info[k] = v - - if k in TAGS: - n += 1 - - else: - msg = "Syntax error in IM header: " + s.decode("ascii", "replace") - raise SyntaxError(msg) - - if not n: - msg = "Not an IM file" - raise SyntaxError(msg) - - # Basic attributes - self._size = self.info[SIZE] - self.mode = self.info[MODE] - - # Skip forward to start of image data - while s and s[:1] != b"\x1A": - s = self.fp.read(1) - if not s: - msg = "File truncated" - raise SyntaxError(msg) - - if LUT in self.info: - # convert lookup table to palette or lut attribute - palette = self.fp.read(768) - greyscale = 1 # greyscale palette - linear = 1 # linear greyscale palette - for i in range(256): - if palette[i] == palette[i + 256] == palette[i + 512]: - if palette[i] != i: - linear = 0 - else: - greyscale = 0 - if self.mode in ["L", "LA", "P", "PA"]: - if greyscale: - if not linear: - self.lut = list(palette[:256]) - else: - if self.mode in ["L", "P"]: - self.mode = self.rawmode = "P" - elif self.mode in ["LA", "PA"]: - self.mode = "PA" - self.rawmode = "PA;L" - self.palette = ImagePalette.raw("RGB;L", palette) - elif self.mode == "RGB": - if not greyscale or not linear: - self.lut = list(palette) - - self.frame = 0 - - self.__offset = offs = self.fp.tell() - - self._fp = self.fp # FIXME: hack - - if self.rawmode[:2] == "F;": - # ifunc95 formats - try: - # use bit decoder (if necessary) - bits = int(self.rawmode[2:]) - if bits not in [8, 16, 32]: - self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))] - return - except ValueError: - pass - - if self.rawmode in ["RGB;T", "RYB;T"]: - # Old LabEye/3PC files. Would be very surprised if anyone - # ever stumbled upon such a file ;-) - size = self.size[0] * self.size[1] - self.tile = [ - ("raw", (0, 0) + self.size, offs, ("G", 0, -1)), - ("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)), - ("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)), - ] - else: - # LabEye/IFUNC files - self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] - - @property - def n_frames(self): - return self.info[FRAMES] - - @property - def is_animated(self): - return self.info[FRAMES] > 1 - - def seek(self, frame): - if not self._seek_check(frame): - return - - self.frame = frame - - if self.mode == "1": - bits = 1 - else: - bits = 8 * len(self.mode) - - size = ((self.size[0] * bits + 7) // 8) * self.size[1] - offs = self.__offset + frame * size - - self.fp = self._fp - - self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] - - def tell(self): - return self.frame - - -# -# -------------------------------------------------------------------- -# Save IM files - - -SAVE = { - # mode: (im type, raw mode) - "1": ("0 1", "1"), - "L": ("Greyscale", "L"), - "LA": ("LA", "LA;L"), - "P": ("Greyscale", "P"), - "PA": ("LA", "PA;L"), - "I": ("L 32S", "I;32S"), - "I;16": ("L 16", "I;16"), - "I;16L": ("L 16L", "I;16L"), - "I;16B": ("L 16B", "I;16B"), - "F": ("L 32F", "F;32F"), - "RGB": ("RGB", "RGB;L"), - "RGBA": ("RGBA", "RGBA;L"), - "RGBX": ("RGBX", "RGBX;L"), - "CMYK": ("CMYK", "CMYK;L"), - "YCbCr": ("YCC", "YCbCr;L"), -} - - -def _save(im, fp, filename): - try: - image_type, rawmode = SAVE[im.mode] - except KeyError as e: - msg = f"Cannot save {im.mode} images as IM" - raise ValueError(msg) from e - - frames = im.encoderinfo.get("frames", 1) - - fp.write(f"Image type: {image_type} image\r\n".encode("ascii")) - if filename: - # Each line must be 100 characters or less, - # or: SyntaxError("not an IM file") - # 8 characters are used for "Name: " and "\r\n" - # Keep just the filename, ditch the potentially overlong path - name, ext = os.path.splitext(os.path.basename(filename)) - name = "".join([name[: 92 - len(ext)], ext]) - - fp.write(f"Name: {name}\r\n".encode("ascii")) - fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii")) - fp.write(f"File size (no of images): {frames}\r\n".encode("ascii")) - if im.mode in ["P", "PA"]: - fp.write(b"Lut: 1\r\n") - fp.write(b"\000" * (511 - fp.tell()) + b"\032") - if im.mode in ["P", "PA"]: - im_palette = im.im.getpalette("RGB", "RGB;L") - colors = len(im_palette) // 3 - palette = b"" - for i in range(3): - palette += im_palette[colors * i : colors * (i + 1)] - palette += b"\x00" * (256 - colors) - fp.write(palette) # 768 bytes - ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))]) - - -# -# -------------------------------------------------------------------- -# Registry - - -Image.register_open(ImImageFile.format, ImImageFile) -Image.register_save(ImImageFile.format, _save) - -Image.register_extension(ImImageFile.format, ".im") diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py deleted file mode 100644 index dba637117a0ac148af65c75853dd3bffbbbd1154..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py +++ /dev/null @@ -1,308 +0,0 @@ -from fontTools import ttLib -from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder -from fontTools.misc import sstruct -from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex -from . import DefaultTable -import sys -import struct -import array -import logging - -log = logging.getLogger(__name__) - -postFormat = """ - > - formatType: 16.16F - italicAngle: 16.16F # italic angle in degrees - underlinePosition: h - underlineThickness: h - isFixedPitch: L - minMemType42: L # minimum memory if TrueType font is downloaded - maxMemType42: L # maximum memory if TrueType font is downloaded - minMemType1: L # minimum memory if Type1 font is downloaded - maxMemType1: L # maximum memory if Type1 font is downloaded -""" - -postFormatSize = sstruct.calcsize(postFormat) - - -class table__p_o_s_t(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - sstruct.unpack(postFormat, data[:postFormatSize], self) - data = data[postFormatSize:] - if self.formatType == 1.0: - self.decode_format_1_0(data, ttFont) - elif self.formatType == 2.0: - self.decode_format_2_0(data, ttFont) - elif self.formatType == 3.0: - self.decode_format_3_0(data, ttFont) - elif self.formatType == 4.0: - self.decode_format_4_0(data, ttFont) - else: - # supported format - raise ttLib.TTLibError( - "'post' table format %f not supported" % self.formatType - ) - - def compile(self, ttFont): - data = sstruct.pack(postFormat, self) - if self.formatType == 1.0: - pass # we're done - elif self.formatType == 2.0: - data = data + self.encode_format_2_0(ttFont) - elif self.formatType == 3.0: - pass # we're done - elif self.formatType == 4.0: - data = data + self.encode_format_4_0(ttFont) - else: - # supported format - raise ttLib.TTLibError( - "'post' table format %f not supported" % self.formatType - ) - return data - - def getGlyphOrder(self): - """This function will get called by a ttLib.TTFont instance. - Do not call this function yourself, use TTFont().getGlyphOrder() - or its relatives instead! - """ - if not hasattr(self, "glyphOrder"): - raise ttLib.TTLibError("illegal use of getGlyphOrder()") - glyphOrder = self.glyphOrder - del self.glyphOrder - return glyphOrder - - def decode_format_1_0(self, data, ttFont): - self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs] - - def decode_format_2_0(self, data, ttFont): - (numGlyphs,) = struct.unpack(">H", data[:2]) - numGlyphs = int(numGlyphs) - if numGlyphs > ttFont["maxp"].numGlyphs: - # Assume the numGlyphs field is bogus, so sync with maxp. - # I've seen this in one font, and if the assumption is - # wrong elsewhere, well, so be it: it's hard enough to - # work around _one_ non-conforming post format... - numGlyphs = ttFont["maxp"].numGlyphs - data = data[2:] - indices = array.array("H") - indices.frombytes(data[: 2 * numGlyphs]) - if sys.byteorder != "big": - indices.byteswap() - data = data[2 * numGlyphs :] - maxIndex = max(indices) - self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257) - self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs) - for glyphID in range(numGlyphs): - index = indices[glyphID] - if index > 257: - try: - name = extraNames[index - 258] - except IndexError: - name = "" - else: - # fetch names from standard list - name = standardGlyphOrder[index] - glyphOrder[glyphID] = name - self.build_psNameMapping(ttFont) - - def build_psNameMapping(self, ttFont): - mapping = {} - allNames = {} - for i in range(ttFont["maxp"].numGlyphs): - glyphName = psName = self.glyphOrder[i] - if glyphName == "": - glyphName = "glyph%.5d" % i - if glyphName in allNames: - # make up a new glyphName that's unique - n = allNames[glyphName] - while (glyphName + "#" + str(n)) in allNames: - n += 1 - allNames[glyphName] = n + 1 - glyphName = glyphName + "#" + str(n) - - self.glyphOrder[i] = glyphName - allNames[glyphName] = 1 - if glyphName != psName: - mapping[glyphName] = psName - - self.mapping = mapping - - def decode_format_3_0(self, data, ttFont): - # Setting self.glyphOrder to None will cause the TTFont object - # try and construct glyph names from a Unicode cmap table. - self.glyphOrder = None - - def decode_format_4_0(self, data, ttFont): - from fontTools import agl - - numGlyphs = ttFont["maxp"].numGlyphs - indices = array.array("H") - indices.frombytes(data) - if sys.byteorder != "big": - indices.byteswap() - # In some older fonts, the size of the post table doesn't match - # the number of glyphs. Sometimes it's bigger, sometimes smaller. - self.glyphOrder = glyphOrder = [""] * int(numGlyphs) - for i in range(min(len(indices), numGlyphs)): - if indices[i] == 0xFFFF: - self.glyphOrder[i] = "" - elif indices[i] in agl.UV2AGL: - self.glyphOrder[i] = agl.UV2AGL[indices[i]] - else: - self.glyphOrder[i] = "uni%04X" % indices[i] - self.build_psNameMapping(ttFont) - - def encode_format_2_0(self, ttFont): - numGlyphs = ttFont["maxp"].numGlyphs - glyphOrder = ttFont.getGlyphOrder() - assert len(glyphOrder) == numGlyphs - indices = array.array("H") - extraDict = {} - extraNames = self.extraNames = [ - n for n in self.extraNames if n not in standardGlyphOrder - ] - for i in range(len(extraNames)): - extraDict[extraNames[i]] = i - for glyphID in range(numGlyphs): - glyphName = glyphOrder[glyphID] - if glyphName in self.mapping: - psName = self.mapping[glyphName] - else: - psName = glyphName - if psName in extraDict: - index = 258 + extraDict[psName] - elif psName in standardGlyphOrder: - index = standardGlyphOrder.index(psName) - else: - index = 258 + len(extraNames) - extraDict[psName] = len(extraNames) - extraNames.append(psName) - indices.append(index) - if sys.byteorder != "big": - indices.byteswap() - return ( - struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames) - ) - - def encode_format_4_0(self, ttFont): - from fontTools import agl - - numGlyphs = ttFont["maxp"].numGlyphs - glyphOrder = ttFont.getGlyphOrder() - assert len(glyphOrder) == numGlyphs - indices = array.array("H") - for glyphID in glyphOrder: - glyphID = glyphID.split("#")[0] - if glyphID in agl.AGL2UV: - indices.append(agl.AGL2UV[glyphID]) - elif len(glyphID) == 7 and glyphID[:3] == "uni": - indices.append(int(glyphID[3:], 16)) - else: - indices.append(0xFFFF) - if sys.byteorder != "big": - indices.byteswap() - return indices.tobytes() - - def toXML(self, writer, ttFont): - formatstring, names, fixes = sstruct.getformat(postFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - if hasattr(self, "mapping"): - writer.begintag("psNames") - writer.newline() - writer.comment( - "This file uses unique glyph names based on the information\n" - "found in the 'post' table. Since these names might not be unique,\n" - "we have to invent artificial names in case of clashes. In order to\n" - "be able to retain the original information, we need a name to\n" - "ps name mapping for those cases where they differ. That's what\n" - "you see below.\n" - ) - writer.newline() - items = sorted(self.mapping.items()) - for name, psName in items: - writer.simpletag("psName", name=name, psName=psName) - writer.newline() - writer.endtag("psNames") - writer.newline() - if hasattr(self, "extraNames"): - writer.begintag("extraNames") - writer.newline() - writer.comment( - "following are the name that are not taken from the standard Mac glyph order" - ) - writer.newline() - for name in self.extraNames: - writer.simpletag("psName", name=name) - writer.newline() - writer.endtag("extraNames") - writer.newline() - if hasattr(self, "data"): - writer.begintag("hexdata") - writer.newline() - writer.dumphex(self.data) - writer.endtag("hexdata") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name not in ("psNames", "extraNames", "hexdata"): - setattr(self, name, safeEval(attrs["value"])) - elif name == "psNames": - self.mapping = {} - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "psName": - self.mapping[attrs["name"]] = attrs["psName"] - elif name == "extraNames": - self.extraNames = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "psName": - self.extraNames.append(attrs["name"]) - else: - self.data = readHex(content) - - -def unpackPStrings(data, n): - # extract n Pascal strings from data. - # if there is not enough data, use "" - - strings = [] - index = 0 - dataLen = len(data) - - for _ in range(n): - if dataLen <= index: - length = 0 - else: - length = byteord(data[index]) - index += 1 - - if dataLen <= index + length - 1: - name = "" - else: - name = tostr(data[index : index + length], encoding="latin1") - strings.append(name) - index += length - - if index < dataLen: - log.warning("%d extra bytes in post.stringData array", dataLen - index) - - elif dataLen < index: - log.warning("not enough data in post.stringData array") - - return strings - - -def packPStrings(strings): - data = b"" - for s in strings: - data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") - return data diff --git a/spaces/cihyFjudo/fairness-paper-search/Aashiqui 2 movie download blu-ray movies for free Enjoy the romantic musical drama in HD quality.md b/spaces/cihyFjudo/fairness-paper-search/Aashiqui 2 movie download blu-ray movies for free Enjoy the romantic musical drama in HD quality.md deleted file mode 100644 index 1e13eb5819af49717b3e8f41ed12e5d0ef54b00a..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Aashiqui 2 movie download blu-ray movies for free Enjoy the romantic musical drama in HD quality.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Aashiqui 2 movie download blu-ray movies for free


      Download >>> https://tinurli.com/2uwkHb



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cihyFjudo/fairness-paper-search/Achaneyanenikkishtam Malayalam Movie Songs Downloadl Watch Mohanlals Guest Appearance and Sing Along.md b/spaces/cihyFjudo/fairness-paper-search/Achaneyanenikkishtam Malayalam Movie Songs Downloadl Watch Mohanlals Guest Appearance and Sing Along.md deleted file mode 100644 index b6fe602d0ef8abe33968e01541bf12b2f5f8f3b3..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Achaneyanenikkishtam Malayalam Movie Songs Downloadl Watch Mohanlals Guest Appearance and Sing Along.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      Malayalam song download, Here you can malayalam song download and listen free in high quality and all superhit malayalam songs of fresh and legendary artists available here. You can also explore a to z malayalam songs free, latest malayalam song new

      -

      Achaneyanenikkishtam Malayalam Movie Songs Downloadl


      Download Zip ✶✶✶ https://tinurli.com/2uwkle



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Myob accountright premier v19 crack rar travaillez en ligne ou hors ligne collaborez avec votre quipe et votre comptable.md b/spaces/cihyFjudo/fairness-paper-search/Myob accountright premier v19 crack rar travaillez en ligne ou hors ligne collaborez avec votre quipe et votre comptable.md deleted file mode 100644 index 26ec6a27792dcf4ed23fcd4b1df4b5fe7f3064f5..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Myob accountright premier v19 crack rar travaillez en ligne ou hors ligne collaborez avec votre quipe et votre comptable.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Myob accountright premier v19 crack rar


      DOWNLOADhttps://tinurli.com/2uwkQP



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cihyFjudo/fairness-paper-search/Nie opuszczaj mnie. Kazuo Ishiguro. Mobi. Co kryje si za fasad idyllicznego miejsca?.md b/spaces/cihyFjudo/fairness-paper-search/Nie opuszczaj mnie. Kazuo Ishiguro. Mobi. Co kryje si za fasad idyllicznego miejsca?.md deleted file mode 100644 index 02a7b63389503262bfa5457e46929f2f7608f62b..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Nie opuszczaj mnie. Kazuo Ishiguro. Mobi. Co kryje si za fasad idyllicznego miejsca?.md +++ /dev/null @@ -1,5 +0,0 @@ -
      -

      Kazuo Ishiguro urodzi\\u0142 si\\u0119 w 1954 roku w Nagasaki. To brytyjski pisarz japo\\u0144skiego pochodzenia. Jest autorem siedmiu powie\\u015bci i jednego zbioru opowiada\\u0144. Pisze tak\\u017ce scenariusze, kr\\u00f3tkie opowiadania dla prasy i teksty piosenek. Do jego najwa\\u017cniejszych dzie\\u0142 nale\\u017cy zaliczy\\u0107 takie ksi\\u0105\\u017cki jak \\u201eMalarz \\u015bwiata u\\u0142udy\\u201d, \\u201eOkruchy dnia\\u201d i \\u201ePejza\\u017c w kolorze sepii\\u201d. W 2017 roku Akademia Szwedzka po raz kolejny zgotowa\\u0142a niespodziank\\u0119 wszystkim czytelnikom \\u015bledz\\u0105cym werdykty noblowskie. Laureatem Nagrody Nobla w dziedzinie literatury zosta\\u0142 bowiem Kazuo Ishiguro, cho\\u0107 wszyscy spodziewali si\\u0119, \\u017ce je\\u015bli nagroda trafi do autora zwi\\u0105zanego z japo\\u0144skim kr\\u0119giem kulturowym, b\\u0119dzie to Haruki Murakami. Ishiguro otrzyma\\u0142 Nobla, jako ten, \\u201ekt\\u00f3ry w powie\\u015bciach o pot\\u0119\\u017cnej sile emocjonalnej ods\\u0142oni\\u0142 otch\\u0142a\\u0144 pod naszym iluzorycznym poczuciem \\u0142\\u0105czno\\u015bci ze \\u015bwiatem\\u201d.\\r\\n\\r\\nRodzina Kazuo Ishiguro przeprowadzi\\u0142a si\\u0119 do Wielkiej Brytanii, gdy ch\\u0142opiec mia\\u0142 pi\\u0119\\u0107 lat. Cho\\u0107 pierwsze lata \\u017cycia sp\\u0119dzi\\u0142 w Japonii, to zdecydowanie najwi\\u0119kszy wp\\u0142yw na jego m\\u0142odo\\u015b\\u0107 mia\\u0142o przebywanie w \\u015brodowisku brytyjskim. Ishiguro wielokrotnie opowiada\\u0142 o znaczeniu emigracyjnego \\u017cycia w jego tw\\u00f3rczo\\u015bci. Z jednej strony widzia\\u0142 \\u017cycie w Wielkiej Brytanii z innej perspektywy ni\\u017c jego r\\u00f3wie\\u015bnicy, z drugiej za\\u015b tworzy\\u0142 si\\u0119 w jego g\\u0142owie wyimaginowany obraz Japonii, kt\\u00f3ry po wizycie w ojczy\\u017anie musia\\u0142 zrewidowa\\u0107. Do dzi\\u015b pisarz mieszka i tworzy w Londynie. \\r\\n\\r\\nIshiguro ucz\\u0119szcza\\u0142 do Stoughton Primary School, a nast\\u0119pnie Woking County Grammar School w Surrey, w miejscowo\\u015bci, w kt\\u00f3rej pracowa\\u0142 jego ojciec. Po zako\\u0144czeniu nauki w szkole, w 1973 roku sp\\u0119dzi\\u0142 tzw. \\u201egap year\\u201d podr\\u00f3\\u017cuj\\u0105c po Stanach Zjednoczonych i Kanadzie. W trakcie wyprawy nagrywa\\u0142 piosenki i wysy\\u0142a\\u0142 je do r\\u00f3\\u017cnych wytw\\u00f3rni p\\u0142ytowych. W 1974 Ishiguro rozpocz\\u0105\\u0142 studia na Uniwersytecie Kent w Canterbury. Szko\\u0142\\u0119 uko\\u0144czy\\u0142 w 1978 roku z tytu\\u0142em Bachelor of Arts z j\\u0119zyka angielskiego i filozofii. P\\u00f3\\u017aniej studiowa\\u0142 na Uniwersytecie Wschodniej Anglii. Jego praca dyplomowa z kreatywnego pisania sta\\u0142a si\\u0119 podstaw\\u0105 pierwszej ksi\\u0105\\u017cki.\\r\\n\\r\\nTw\\u00f3rczo\\u015b\\u0107 Kazuo Ishiguro\\r\\n\\r\\nIshiguro debiutowa\\u0142 w 1982 roku powie\\u015bci\\u0105 \\u201ePejza\\u017c w kolorze sepii\\u201d. Bohaterk\\u0105 ksi\\u0105\\u017cki jest Etsuko, japo\\u0144ska wdowa mieszkaj\\u0105ca w Wielkiej Brytanii. Kobieta ma silne poczucie winy w zwi\\u0105zku z samob\\u00f3jcz\\u0105 \\u015bmierci\\u0105 c\\u00f3rki. Nie potrafi poradzi\\u0107 sobie z traum\\u0105 po utracie dziecka, a ponadto trapi\\u0105 j\\u0105 sny, w kt\\u00f3rych wracaj\\u0105 wspomnienia z tragicznej wojny w Japonii. W swojej powie\\u015bci Ishiguro wci\\u0105ga czytelnik\\u00f3w w pewnego rodzaju gr\\u0119, w kt\\u00f3rej przesz\\u0142o\\u015b\\u0107 zaczyna zlewa\\u0107 si\\u0119 z tera\\u017aniejszo\\u015bci\\u0105. \\r\\n\\r\\nW 1986 roku ukaza\\u0142a si\\u0119 druga powie\\u015b\\u0107 Ishiguro pt. \\u201eMalarz \\u015bwiata u\\u0142udy\\u201d, jedna z ciekawszych ksi\\u0105\\u017cek w jego dorobku. Ishiguro przedstawi\\u0142 w niej posta\\u0107 ekscentrycznego malarza, kt\\u00f3ry prowadzi zupe\\u0142nie zwyk\\u0142e \\u017cycie. Pewnego dnia artysta wpada na nietuzinkowy pomys\\u0142. Wytacza sam sobie proces, w kt\\u00f3rym wciela si\\u0119 w rol\\u0119 oskar\\u017cyciela, oskar\\u017conego i s\\u0119dziego. Czytelnik ma szans\\u0119 odkrywa\\u0107 histori\\u0119 niechronologicznie, na podstawie opis\\u00f3w fragment\\u00f3w \\u017cycia malarza Masuji Ono. W tle po raz kolejny pojawia si\\u0119 temat II wojny \\u015bwiatowej i tragicznych w skutkach bombardowa\\u0144 przeprowadzonych przez ameryka\\u0144skie wojsko.\\r\\n\\r\\nJedna z najbardziej znanych ksi\\u0105\\u017cek Ishiguro \\u2013 \\u201eOkruchy dnia\\u201d, mia\\u0142a swoj\\u0105 premier\\u0119 w 1989 roku. To najg\\u0142o\\u015bniejsza do tej pory powie\\u015b\\u0107 autora, kt\\u00f3ra znalaz\\u0142a si\\u0119 na li\\u015bcie \\u201c100 ksi\\u0105\\u017cek, kt\\u00f3re trzeba przeczyta\\u0107\\u201d stworzonej przez BBC. Pisarz otrzyma\\u0142 za ni\\u0105 Nagrod\\u0119 Bookera. Popularno\\u015bci przysporzy\\u0142a ksi\\u0105\\u017cce r\\u00f3wnie\\u017c ekranizacja z Anthonym Hopkinsem w roli g\\u0142\\u00f3wnej. Bohaterem ksi\\u0105\\u017cki jest Stevens, kamerdyner, kt\\u00f3ry wi\\u0119kszo\\u015b\\u0107 \\u017cycia sp\\u0119dzi\\u0142 s\\u0142u\\u017c\\u0105c lordowi Darlingtonowi. Nawet po \\u015bmierci gospodarza Stevens zajmuje si\\u0119 posiad\\u0142o\\u015bci\\u0105. Pewnego dnia bohater postanawia wyruszy\\u0107 do Francji by nam\\u00f3wi\\u0107 poprzedni\\u0105 gospodyni\\u0119 do powrotu. W trakcie wyprawy u\\u015bwiadamia sobie, \\u017ce wi\\u0119kszo\\u015b\\u0107 \\u017cycia ma ju\\u017c za sob\\u0105 i nie uda mu si\\u0119 cofn\\u0105\\u0107 czasu.\\r\\n\\r\\nSze\\u015b\\u0107 lat p\\u00f3\\u017aniej ukaza\\u0142a si\\u0119 kolejna powie\\u015b\\u0107 autora \\u201eNiepocieszony\\u201d, w kt\\u00f3rej Ishiguro przedstawia losy znanego muzyka, kt\\u00f3ry trafia do bli\\u017cej nieokre\\u015blonego miasta w centrum Europy by da\\u0107 koncert. Fabu\\u0142a zaw\\u0119\\u017ca si\\u0119 do trzech dni, w trakcie kt\\u00f3rych pianista ci\\u0105gle proszony jest o spe\\u0142nianie przys\\u0142ug. Szybko u\\u015bwiadamia sobie, \\u017ce nie da rady sprosta\\u0107 wszystkim oczekiwaniom.\\r\\n\\r\\nW 2000 roku swoj\\u0105 premier\\u0119 mia\\u0142a ksi\\u0105\\u017cka \\u201eKiedy byli\\u015bmy sierotami\\u201d stylizowana na krymina\\u0142 noir. Fabu\\u0142a powie\\u015bci rozgrywa si\\u0119 w latach 30. XX wieku. Znany brytyjski detektyw wyrusza w podr\\u00f3\\u017c do Szanghaju by odnale\\u017a\\u0107 rodzic\\u00f3w, kt\\u00f3rzy zagin\\u0119li w nieznanych okoliczno\\u015bciach, gdy ten by\\u0142 ma\\u0142ym ch\\u0142opcem. Christopher Banks coraz g\\u0142\\u0119biej zanurza si\\u0119 w \\u015bwiat miasta, kt\\u00f3re chowa przed nim wi\\u0119cej ni\\u017c przypuszcza\\u0142.\\r\\n\\r\\nPi\\u0119\\u0107 lat p\\u00f3\\u017aniej ukaza\\u0142a si\\u0119 powie\\u015b\\u0107 \\u201eNie opuszczaj mnie\\u201d. Tym razem pisarz sprezentowa\\u0142 czytelnikom ksi\\u0105\\u017ck\\u0119 inspirowan\\u0105 science-fiction. W powie\\u015bci Ishiguro przedstawi\\u0142 uczni\\u00f3w elitarnej szko\\u0142y, kt\\u00f3rych \\u017cycie nie r\\u00f3\\u017cni si\\u0119 specjalnie od \\u017cycia innych. Bohaterowie prze\\u017cywaj\\u0105 normalne emocje, nie maj\\u0105c \\u015bwiadomo\\u015bci, \\u017ce s\\u0105 jedynie nosicielami narz\\u0105d\\u00f3w - klonami stworzonymi na potrzeby transplantacji. \\r\\n\\r\\nPo publikacji \\u201eNie opuszczaj mnie\\u201d, Ishiguro zaserwowa\\u0142 czytelnikom najd\\u0142u\\u017csz\\u0105 przerw\\u0119 w dotychczasowej karierze. Niemal dziesi\\u0119\\u0107 lat up\\u0142yn\\u0119\\u0142o do wydania powie\\u015bci \\u201ePogrzebany olbrzym\\u201d, kolejnego zwrotu w karierze pisarza, tym razem w stron\\u0119 fantastyki. Ksi\\u0105\\u017cka przedstawia \\u015bredniowieczny \\u015bwiat, w czasach nast\\u0119puj\\u0105cych zaraz po latach w\\u0142adania kr\\u00f3la Artura, nad kt\\u00f3rym kr\\u0105\\u017cy widmo smoczycy Querig. Dw\\u00f3jka g\\u0142\\u00f3wnych bohater\\u00f3w wyrusza w drog\\u0119 przez kraj Bryt\\u00f3w by odszuka\\u0107 syna. W mi\\u0119dzyczasie trafiaj\\u0105 do wioski zamieszkanej przez ogry, spotykaj\\u0105 ostatniego rycerza Okr\\u0105g\\u0142ego Sto\\u0142u, walcz\\u0105 z dziwnymi kreaturami. To swoisty miks klasycznej powie\\u015bci rycerskiej z fantastyk\\u0105.\\r\\n\\r\\nNajnowsza powie\\u015b\\u0107 Kauzo Ishiguro po otrzymaniu Nagrody Nobla\\r\\n\\r\\nW marcu 2021 roku ukaza\\u0142a si\\u0119 pierwsza powie\\u015b\\u0107 Kauzo Ishiguro po otrzymaniu Nagrody Nobla. Autor po raz kolejny zabiera czytelnik\\u00f3w do \\u015bwiata science-fiction. \\u201eKlara i s\\u0142o\\u0144ce\\u201d to \\u015bwiat widziany oczami nietypowej bohaterki. Rzeczywisto\\u015b\\u0107, od kt\\u00f3rej ludzi dzieli zaledwie krok. Klara to Sztuczna Przyjaci\\u00f3\\u0142ka, kt\\u00f3ra posiada bogato rozwini\\u0119te zdolno\\u015bci poznawcze oraz empati\\u0119. Klara czeka na klienta, kt\\u00f3ry zabierze j\\u0105 z p\\u00f3\\u0142ki sklepowej. Ishiguro zadaje pytania \\u2013 co czyni cz\\u0142owieka wyj\\u0105tkowym i jak wygl\\u0105da\\u0142by \\u015bwiat, w kt\\u00f3rym maszyny odczuwa\\u0142yby emocje?\\r\\n\\r\\nPonadto Ishiguro ma na swoim koncie zbi\\u00f3r opowiada\\u0144 pt. \\u201eNokturny\\u201d, kt\\u00f3ry ukaza\\u0142 si\\u0119 w 2009 roku. Zar\\u00f3wno \\u201eNokturny\\u201d, jak i inne powie\\u015bci \\u2013 \\u201eNie opuszczaj mnie\\u201d, \\u201ePogrzebany olbrzym\\u201d, \\u201eKiedy byli\\u015bmy sierotami\\u201d, \\u201eNiepocieszony\\u201d, \\u201eOkruchy dnia\\u201d, \\u201eMalarz \\u015bwiata u\\u0142udy\\u201d, \\u201ePejza\\u017c w kolorze sepii\\u201d, \\u201eKlara i S\\u0142o\\u0144ce\\u201d znajdziecie w formie ebooka i papierowej w ksi\\u0119garni internetowej Woblink. Ponadto wybrane ksi\\u0105\\u017cki Kazuo Ishiguro dost\\u0119pne s\\u0105 w formie audiobooka.\",\"httpDescription\":\"Kazuo Ishiguro ebooki, audiobooki, ksi\\u0105\\u017cki. Przegl\\u0105daj w \\u0142atwy spos\\u00f3b tytu\\u0142y autora. Filtruj po cenie, kategorii, wydawcy lub formacie. Sprawd\\u017a jakie to proste!\"},\"seriesImage\":null,\"genreAggregationCount\":\"1\":37,\"73\":35,\"77\":25,\"80\":12,\"194\":2,\"210\":2,\"211\":2,\"218\":2,\"57\":1}}").storeData; dataLayer = []; (function(w,d,s,l,i)w[l]=w[l])(window,document,'script','dataLayer','GTM-MTBPCS'); "use strict"; (function(scriptUrl,campaignId)if(!window.bb)window.bb=function() for(var _len=arguments.length,params=Array(_len),_key=0;_key1),N(!0)),],5:[function(t,e,n)function r()var t=new PerformanceObserver(function(t,e)var n=t.getEntries();s(v,[n]));tryt.observe(entryTypes:["resource"])catch(e)function o(t)if(s(v,[window.performance.getEntriesByType(w)]),window.performance["c"+p])trywindow.performance[h](m,o,!1)catch(t)else trywindow.performance[h]("webkit"+m,o,!1)catch(t)function i(t)if(window.performance&&window.performance.timing&&window.performance.getEntriesByType)var a=t("ee"),s=t("handle"),c=t(14),f=t(13),u=t(6),d=t(23),p="learResourceTimings",l="addEventListener",h="removeEventListener",m="resourcetimingbufferfull",v="bstResource",w="resource",g="-start",y="-end",x="fn"+g,b="fn"+y,E="bstTimer",R="pushState",O=t("loader");if(!O.disabled)O.features.stn=!0,t(9),"addEventListener"in window&&t(7);var T=NREUM.o.EV;a.on(x,function(t,e)var n=t[0];n instanceof T&&(this.bstStart=O.now())),a.on(b,function(t,e)var n=t[0];n instanceof T&&s("bst",[n,e,this.bstStart,O.now()])),c.on(x,function(t,e,n)this.bstStart=O.now(),this.bstType=n),c.on(b,function(t,e)s(E,[e,this.bstStart,O.now(),this.bstType])),f.on(x,function()this.bstStart=O.now()),f.on(b,function(t,e)s(E,[e,this.bstStart,O.now(),"requestAnimationFrame"])),a.on(R+g,function(t)this.time=O.now(),this.startPath=location.pathname+location.hash),a.on(R+y,function(t)s("bstHist",[location.pathname+location.hash,this.startPath,this.time])),u()?(s(v,[window.performance.getEntriesByType("resource")]),r()):l in window.performance&&(window.performance["c"+p]?window.performance[l](m,o,d(!1)):window.performance[l]("webkit"+m,o,d(!1))),document[l]("scroll",i,d(!1)),document[l]("keypress",i,d(!1)),document[l]("click",i,d(!1)),],6:[function(t,e,n)e.exports=function()return"PerformanceObserver"in window&&"function"==typeof window.PerformanceObserver,],7:[function(t,e,n)function r(t)for(var e=t;e&&!e.hasOwnProperty(u);)e=Object.getPrototypeOf(e);e&&o(e)function o(t)s.inPlace(t,[u,d],"-",i)function i(t,e)return t[1]var a=t("ee").get("events"),s=t("wrap-function")(a,!0),c=t("gos"),f=XMLHttpRequest,u="addEventListener",d="removeEventListener";e.exports=a,"getPrototypeOf"in Object?(r(document),r(window),r(f.prototype)):f.prototype.hasOwnProperty(u)&&(o(window),o(f.prototype)),a.on(u+"-start",function(t,e)),a.on(d+"-start",function(t)t[1]=this.wrapped),],8:[function(t,e,n)function r(t,e,n)var r=t[e];"function"==typeof r&&(t[e]=function()var t=i(arguments),e=;o.emit(n+"before-start",[t],e);var a;e[m]&&e[m].dt&&(a=e[m].dt);var s=r.apply(this,t);return o.emit(n+"start",[t,a],s),s.then(function(t)return o.emit(n+"end",[null,t],s),t,function(t)throw o.emit(n+"end",[t],s),t))var o=t("ee").get("fetch"),i=t(32),a=t(31);e.exports=o;var s=window,c="fetch-",f=c+"body-",u=["arrayBuffer","blob","json","text","formData"],d=s.Request,p=s.Response,l=s.fetch,h="prototype",m="nr@context";d&&p&&l&&(a(u,function(t,e)r(d[h],e,f),r(p[h],e,f)),r(s,"fetch",c),o.on(c+"end",function(t,e)var n=this;if(e)var r=e.headers.get("content-length");null!==r&&(n.rxSize=r),o.emit(c+"done",[null,e],n)else o.emit(c+"done",[t],n))),],9:[function(t,e,n)var r=t("ee").get("history"),o=t("wrap-function")(r);e.exports=r;var i=window.history&&window.history.constructor&&window.history.constructor.prototype,a=window.history;i&&i.pushState&&i.replaceState&&(a=i),o.inPlace(a,["pushState","replaceState"],"-"),],10:[function(t,e,n)function r(t)function e()f.emit("jsonp-end",[],l),t.removeEventListener("load",e,c(!1)),t.removeEventListener("error",n,c(!1))function n()f.emit("jsonp-error",[],l),f.emit("jsonp-end",[],l),t.removeEventListener("load",e,c(!1)),t.removeEventListener("error",n,c(!1))var r=t&&"string"==typeof t.nodeName&&"script"===t.nodeName.toLowerCase();if(r)var o="function"==typeof t.addEventListener;if(o)var a=i(t.src);if(a)var d=s(a),p="function"==typeof d.parent[d.key];if(p)var l=;u.inPlace(d.parent,[d.key],"cb-",l),t.addEventListener("load",e,c(!1)),t.addEventListener("error",n,c(!1)),f.emit("new-jsonp",[t.src],l)function o()return"addEventListener"in windowfunction i(t)var e=t.match(d);return e?e[1]:nullfunction a(t,e)var n=t.match(l),r=n[1],o=n[3];return o?a(o,e[r]):e[r]function s(t)var e=t.match(p);return e&&e.length>=3?key:e[2],parent:a(e[1],window):key:t,parent:windowvar c=t(23),f=t("ee").get("jsonp"),u=t("wrap-function")(f);if(e.exports=f,o())cb)=([^&#]+)/,p=/(.*)\.([^.]+)/,l=/^(\w+)(\.,],11:[function(t,e,n)var r=t("ee").get("mutation"),o=t("wrap-function")(r),i=NREUM.o.MO;e.exports=r,i&&(window.MutationObserver=function(t)return this instanceof i?new i(o(t,"fn-")):i.apply(this,arguments),MutationObserver.prototype=i.prototype),],12:[function(t,e,n)function r(t)var e=i.context(),n=s(t,"executor-",e,null,!1),r=new f(n);return i.context(r).getCtx=function()return e,rvar o=t("wrap-function"),i=t("ee").get("promise"),a=t("ee").getOrSetContext,s=o(i),c=t(31),f=NREUM.o.PR;e.exports=i,f&&(window.Promise=r,["all","race"].forEach(function(t)var e=f[t];f[t]=function(n)function r(t)return function()!tvar o=!1;c(n,function(e,n)Promise.resolve(n).then(r("all"===t),r(!1)));var a=e.apply(f,arguments),s=f.resolve(a);return s),["resolve","reject"].forEach(function(t)var e=f[t];f[t]=function(t)var n=e.apply(f,arguments);return t!==n&&i.emit("propagate",[t,!0],n,!1,!1),n),f.prototype["catch"]=function(t)return this.then(null,t),f.prototype=Object.create(f.prototype,constructor:value:r),c(Object.getOwnPropertyNames(f),function(t,e)tryr[e]=f[e]catch(n)),o.wrapInPlace(f.prototype,"then",function(t)return function()var e=this,n=o.argsToArray.apply(this,arguments),r=a(e);r.promise=e,n[0]=s(n[0],"cb-",r,null,!1),n[1]=s(n[1],"cb-",r,null,!1);var c=t.apply(this,n);return r.nextPromise=c,i.emit("propagate",[e,!0],c,!1,!1),c),i.on("executor-start",function(t)t[0]=s(t[0],"resolve-",this,null,!1),t[1]=s(t[1],"resolve-",this,null,!1)),i.on("executor-err",function(t,e,n)t[1](n)),i.on("cb-end",function(t,e,n)i.emit("propagate",[n,!0],this.nextPromise,!1,!1)),i.on("propagate",function(t,e,n)),r.toString=function()return""+f),],13:[function(t,e,n)var r=t("ee").get("raf"),o=t("wrap-function")(r),i="equestAnimationFrame";e.exports=r,o.inPlace(window,["r"+i,"mozR"+i,"webkitR"+i,"msR"+i],"raf-"),r.on("raf-start",function(t)t[0]=o(t[0],"fn-")),],14:[function(t,e,n)function r(t,e,n)t[0]=a(t[0],"fn-",null,n)function o(t,e,n)this.method=n,this.timerDuration=isNaN(t[1])?0:+t[1],t[0]=a(t[0],"fn-",this,n)var i=t("ee").get("timer"),a=t("wrap-function")(i),s="setTimeout",c="setInterval",f="clearTimeout",u="-start",d="-";e.exports=i,a.inPlace(window,[s,"setImmediate"],s+d),a.inPlace(window,[c],c+d),a.inPlace(window,[f,"clearImmediate"],f+d),i.on(c+u,r),i.on(s+u,o),],15:[function(t,e,n)function r(t,e)d.inPlace(e,["onreadystatechange"],"fn-",s)function o()var t=this,e=u.context(t);t.readyState>3&&!e.resolved&&(e.resolved=!0,u.emit("xhr-resolved",[],t)),d.inPlace(t,y,"fn-",s)function i(t)x.push(t),m&&(E?E.then(a):w?w(a):(R=-R,O.data=R))function a()for(var t=0;t0&&(n=+this.rxSize);var r=txSize:this.txSize,rxSize:n,duration:a.now()-this.startTime;s("xhr",[this.params,r,this.startTime,this.endTime,"fetch"],this))},],18:[function(t,e,n)var r=;e.exports=function(t)n.hostname,o.pathname=e.pathname,o.protocol=i[0],"/"!==o.pathname.charAt(0)&&(o.pathname="/"+o.pathname);var a=!e.protocol,],19:[function(t,e,n)function r(t,e)"json"===n?o(t.response):"text"===nvar o=t(22);e.exports=r,],20:[function(t,e,n)function r()function o(t,e,n,r)return function()return u.recordSupportability("API/"+e+"/called"),i(t+e,[f.now()].concat(s(arguments)),n?null:this,r),n?void 0:thisvar i=t("handle"),a=t(31),s=t(32),c=t("ee").get("tracer"),f=t("loader"),u=t(25),d=NREUM;"undefined"==typeof window.newrelic&&(newrelic=d);var p=["setPageViewName","setCustomAttribute","setErrorHandler","finished","addToTrace","inlineHit","addRelease"],l="api-",h=l+"ixn-";a(p,function(t,e)d[e]=o(l,e,!0,"api")),d.addPageAction=o(l,"addPageAction",!0),d.setCurrentRouteName=o(l,"routeName",!0),e.exports=newrelic,d.interaction=function()return(new r).get();var m=r.prototype=createTracer:function(t,e)var n=,r=this,o="function"==typeof e;return i(h+"tracer",[f.now(),t,n],r),function()if(c.emit((o?"":"no-")+"fn-start",[f.now(),r,o],n),o)tryreturn e.apply(this,arguments)catch(t)throw c.emit("fn-err",[arguments,this,t],n),tfinallyc.emit("fn-end",[f.now()],n);a("actionText,setName,setAttribute,save,ignore,onEnd,getContext,end,get".split(","),function(t,e)m[e]=o(h,e)),newrelic.noticeError=function(t,e)"string"==typeof t&&(t=new Error(t)),u.recordSupportability("API/noticeError/called"),i("err",[t,f.now(),!1,e]),],21:[function(t,e,n){function r(t){if(NREUM.init){for(var e=NREUM.init,n=t.split("."),r=0;r0){var r=n[n.length-1];if(f&&f

      -

      Nie Opuszczaj Mnie Kazuo Ishiguro Mobi


      Download File >>> https://tinurli.com/2uwhRi



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Sex Addict Skyrim Mod.md b/spaces/cihyFjudo/fairness-paper-search/Sex Addict Skyrim Mod.md deleted file mode 100644 index 60aef9881b7ffa44f0e813894adeaab01c408c18..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Sex Addict Skyrim Mod.md +++ /dev/null @@ -1,5 +0,0 @@ -
      -

      Desktop nude patch: Adrienn
      Set: Sex addict
      Stripper: Skirts, Shaved
      Hair: BLOND Race: European Age: 22
      Country: HUNGARY City: Budapest
      Vital: 31/23/34 Height: 5.70 Weight: 110

      -

      Sex Addict Skyrim Mod


      Download Ziphttps://tinurli.com/2uwiMt



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/The Dark Knight 2008 Brrip 720p Experience the Thrilling Action and Drama of the Batman Saga.md b/spaces/cihyFjudo/fairness-paper-search/The Dark Knight 2008 Brrip 720p Experience the Thrilling Action and Drama of the Batman Saga.md deleted file mode 100644 index a4a34762892bc84be27fb9d929a9594e1068d8f9..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/The Dark Knight 2008 Brrip 720p Experience the Thrilling Action and Drama of the Batman Saga.md +++ /dev/null @@ -1,6 +0,0 @@ -

      TELECHARGERMTNCREDITCRACKERVERSION20


      DOWNLOAD >>>>> https://tinurli.com/2uwj3B



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/codeparrot/code-generation-models/datasets/incoder.md b/spaces/codeparrot/code-generation-models/datasets/incoder.md deleted file mode 100644 index df6df26e47e55f8fa15234659514f5a03f376715..0000000000000000000000000000000000000000 --- a/spaces/codeparrot/code-generation-models/datasets/incoder.md +++ /dev/null @@ -1,14 +0,0 @@ -[InCoder](https://huggingface.co/facebook/incoder-6B) is a code generation model that also allows code editing via [infilling](https://arxiv.org/pdf/2204.05999.pdf). It was trained on **216 GB** of preprocessed data from GitHub and Stack Overflow from 28 programming languages. 52 GB is in Python, 107GB in other programming languages and 57GB is content from Stackoverflow that isn't code. - -The GitHub data was cleaned with the following steps: -- Average line length < 100 tokens -- Maximum line length < 3000 MB -- Alphanumeric characters fraction > 0.4 -- Remove auto-generated files (keyword search) - -The second component of the data consists of questions, answers, and comments from Stack Overflow. It includes: -- all questions that have at least one answer -- up to ten answers with a non-negative score (sorted by score) per question -- up to five comments per question/answer - -Exact match deduplication was performed on code files. For more details please refer to this [paper](https://arxiv.org/pdf/2204.05999.pdf). \ No newline at end of file diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bytestream.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bytestream.h deleted file mode 100644 index 67080604b92fafd45d0e083e9268d13c75359c0d..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bytestream.h +++ /dev/null @@ -1,380 +0,0 @@ -/* - * Bytestream functions - * copyright (c) 2006 Baptiste Coudurier - * Copyright (c) 2012 Aneesh Dogra (lionaneesh) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_BYTESTREAM_H -#define AVCODEC_BYTESTREAM_H - -#include -#include - -#include "libavutil/avassert.h" -#include "libavutil/common.h" -#include "libavutil/intreadwrite.h" - -typedef struct GetByteContext { - const uint8_t *buffer, *buffer_end, *buffer_start; -} GetByteContext; - -typedef struct PutByteContext { - uint8_t *buffer, *buffer_end, *buffer_start; - int eof; -} PutByteContext; - -#define DEF(type, name, bytes, read, write) \ -static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \ -{ \ - (*b) += bytes; \ - return read(*b - bytes); \ -} \ -static av_always_inline void bytestream_put_ ## name(uint8_t **b, \ - const type value) \ -{ \ - write(*b, value); \ - (*b) += bytes; \ -} \ -static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \ - const type value) \ -{ \ - bytestream_put_ ## name(&p->buffer, value); \ -} \ -static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \ - const type value) \ -{ \ - if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \ - write(p->buffer, value); \ - p->buffer += bytes; \ - } else \ - p->eof = 1; \ -} \ -static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \ -{ \ - return bytestream_get_ ## name(&g->buffer); \ -} \ -static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \ -{ \ - if (g->buffer_end - g->buffer < bytes) { \ - g->buffer = g->buffer_end; \ - return 0; \ - } \ - return bytestream2_get_ ## name ## u(g); \ -} \ -static av_always_inline type bytestream2_peek_ ## name ## u(GetByteContext *g) \ -{ \ - return read(g->buffer); \ -} \ -static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \ -{ \ - if (g->buffer_end - g->buffer < bytes) \ - return 0; \ - return bytestream2_peek_ ## name ## u(g); \ -} - -DEF(uint64_t, le64, 8, AV_RL64, AV_WL64) -DEF(unsigned int, le32, 4, AV_RL32, AV_WL32) -DEF(unsigned int, le24, 3, AV_RL24, AV_WL24) -DEF(unsigned int, le16, 2, AV_RL16, AV_WL16) -DEF(uint64_t, be64, 8, AV_RB64, AV_WB64) -DEF(unsigned int, be32, 4, AV_RB32, AV_WB32) -DEF(unsigned int, be24, 3, AV_RB24, AV_WB24) -DEF(unsigned int, be16, 2, AV_RB16, AV_WB16) -DEF(unsigned int, byte, 1, AV_RB8 , AV_WB8) - -#if AV_HAVE_BIGENDIAN -# define bytestream2_get_ne16 bytestream2_get_be16 -# define bytestream2_get_ne24 bytestream2_get_be24 -# define bytestream2_get_ne32 bytestream2_get_be32 -# define bytestream2_get_ne64 bytestream2_get_be64 -# define bytestream2_get_ne16u bytestream2_get_be16u -# define bytestream2_get_ne24u bytestream2_get_be24u -# define bytestream2_get_ne32u bytestream2_get_be32u -# define bytestream2_get_ne64u bytestream2_get_be64u -# define bytestream2_put_ne16 bytestream2_put_be16 -# define bytestream2_put_ne24 bytestream2_put_be24 -# define bytestream2_put_ne32 bytestream2_put_be32 -# define bytestream2_put_ne64 bytestream2_put_be64 -# define bytestream2_peek_ne16 bytestream2_peek_be16 -# define bytestream2_peek_ne24 bytestream2_peek_be24 -# define bytestream2_peek_ne32 bytestream2_peek_be32 -# define bytestream2_peek_ne64 bytestream2_peek_be64 -#else -# define bytestream2_get_ne16 bytestream2_get_le16 -# define bytestream2_get_ne24 bytestream2_get_le24 -# define bytestream2_get_ne32 bytestream2_get_le32 -# define bytestream2_get_ne64 bytestream2_get_le64 -# define bytestream2_get_ne16u bytestream2_get_le16u -# define bytestream2_get_ne24u bytestream2_get_le24u -# define bytestream2_get_ne32u bytestream2_get_le32u -# define bytestream2_get_ne64u bytestream2_get_le64u -# define bytestream2_put_ne16 bytestream2_put_le16 -# define bytestream2_put_ne24 bytestream2_put_le24 -# define bytestream2_put_ne32 bytestream2_put_le32 -# define bytestream2_put_ne64 bytestream2_put_le64 -# define bytestream2_peek_ne16 bytestream2_peek_le16 -# define bytestream2_peek_ne24 bytestream2_peek_le24 -# define bytestream2_peek_ne32 bytestream2_peek_le32 -# define bytestream2_peek_ne64 bytestream2_peek_le64 -#endif - -static av_always_inline void bytestream2_init(GetByteContext *g, - const uint8_t *buf, - int buf_size) -{ - av_assert0(buf_size >= 0); - g->buffer = buf; - g->buffer_start = buf; - g->buffer_end = buf + buf_size; -} - -static av_always_inline void bytestream2_init_writer(PutByteContext *p, - uint8_t *buf, - int buf_size) -{ - av_assert0(buf_size >= 0); - p->buffer = buf; - p->buffer_start = buf; - p->buffer_end = buf + buf_size; - p->eof = 0; -} - -static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g) -{ - return g->buffer_end - g->buffer; -} - -static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p) -{ - return p->buffer_end - p->buffer; -} - -static av_always_inline void bytestream2_skip(GetByteContext *g, - unsigned int size) -{ - g->buffer += FFMIN(g->buffer_end - g->buffer, size); -} - -static av_always_inline void bytestream2_skipu(GetByteContext *g, - unsigned int size) -{ - g->buffer += size; -} - -static av_always_inline void bytestream2_skip_p(PutByteContext *p, - unsigned int size) -{ - unsigned int size2; - if (p->eof) - return; - size2 = FFMIN(p->buffer_end - p->buffer, size); - if (size2 != size) - p->eof = 1; - p->buffer += size2; -} - -static av_always_inline int bytestream2_tell(GetByteContext *g) -{ - return (int)(g->buffer - g->buffer_start); -} - -static av_always_inline int bytestream2_tell_p(PutByteContext *p) -{ - return (int)(p->buffer - p->buffer_start); -} - -static av_always_inline int bytestream2_size(GetByteContext *g) -{ - return (int)(g->buffer_end - g->buffer_start); -} - -static av_always_inline int bytestream2_size_p(PutByteContext *p) -{ - return (int)(p->buffer_end - p->buffer_start); -} - -static av_always_inline int bytestream2_seek(GetByteContext *g, - int offset, - int whence) -{ - switch (whence) { - case SEEK_CUR: - offset = av_clip(offset, -(g->buffer - g->buffer_start), - g->buffer_end - g->buffer); - g->buffer += offset; - break; - case SEEK_END: - offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0); - g->buffer = g->buffer_end + offset; - break; - case SEEK_SET: - offset = av_clip(offset, 0, g->buffer_end - g->buffer_start); - g->buffer = g->buffer_start + offset; - break; - default: - return AVERROR(EINVAL); - } - return bytestream2_tell(g); -} - -static av_always_inline int bytestream2_seek_p(PutByteContext *p, - int offset, - int whence) -{ - p->eof = 0; - switch (whence) { - case SEEK_CUR: - if (p->buffer_end - p->buffer < offset) - p->eof = 1; - offset = av_clip(offset, -(p->buffer - p->buffer_start), - p->buffer_end - p->buffer); - p->buffer += offset; - break; - case SEEK_END: - if (offset > 0) - p->eof = 1; - offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0); - p->buffer = p->buffer_end + offset; - break; - case SEEK_SET: - if (p->buffer_end - p->buffer_start < offset) - p->eof = 1; - offset = av_clip(offset, 0, p->buffer_end - p->buffer_start); - p->buffer = p->buffer_start + offset; - break; - default: - return AVERROR(EINVAL); - } - return bytestream2_tell_p(p); -} - -static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, - uint8_t *dst, - unsigned int size) -{ - unsigned int size2 = FFMIN(g->buffer_end - g->buffer, size); - memcpy(dst, g->buffer, size2); - g->buffer += size2; - return size2; -} - -static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, - uint8_t *dst, - unsigned int size) -{ - memcpy(dst, g->buffer, size); - g->buffer += size; - return size; -} - -static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, - const uint8_t *src, - unsigned int size) -{ - unsigned int size2; - if (p->eof) - return 0; - size2 = FFMIN(p->buffer_end - p->buffer, size); - if (size2 != size) - p->eof = 1; - memcpy(p->buffer, src, size2); - p->buffer += size2; - return size2; -} - -static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, - const uint8_t *src, - unsigned int size) -{ - memcpy(p->buffer, src, size); - p->buffer += size; - return size; -} - -static av_always_inline void bytestream2_set_buffer(PutByteContext *p, - const uint8_t c, - unsigned int size) -{ - unsigned int size2; - if (p->eof) - return; - size2 = FFMIN(p->buffer_end - p->buffer, size); - if (size2 != size) - p->eof = 1; - memset(p->buffer, c, size2); - p->buffer += size2; -} - -static av_always_inline void bytestream2_set_bufferu(PutByteContext *p, - const uint8_t c, - unsigned int size) -{ - memset(p->buffer, c, size); - p->buffer += size; -} - -static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p) -{ - return p->eof; -} - -static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p, - GetByteContext *g, - unsigned int size) -{ - memcpy(p->buffer, g->buffer, size); - p->buffer += size; - g->buffer += size; - return size; -} - -static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p, - GetByteContext *g, - unsigned int size) -{ - unsigned int size2; - - if (p->eof) - return 0; - size = FFMIN(g->buffer_end - g->buffer, size); - size2 = FFMIN(p->buffer_end - p->buffer, size); - if (size2 != size) - p->eof = 1; - - return bytestream2_copy_bufferu(p, g, size2); -} - -static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, - uint8_t *dst, - unsigned int size) -{ - memcpy(dst, *b, size); - (*b) += size; - return size; -} - -static av_always_inline void bytestream_put_buffer(uint8_t **b, - const uint8_t *src, - unsigned int size) -{ - memcpy(*b, src, size); - (*b) += size; -} - -#endif /* AVCODEC_BYTESTREAM_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/vp9_mc_msa.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/vp9_mc_msa.c deleted file mode 100644 index 57ea425727d5c5ace20c06797c4608f33537e670..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/vp9_mc_msa.c +++ /dev/null @@ -1,4446 +0,0 @@ -/* - * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavcodec/vp9dsp.h" -#include "libavutil/mips/generic_macros_msa.h" -#include "vp9dsp_mips.h" - -static const uint8_t mc_filt_mask_arr[16 * 3] = { - /* 8 width cases */ - 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, - /* 4 width cases */ - 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, - /* 4 width cases */ - 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 -}; - -static const int8_t vp9_bilinear_filters_msa[15][2] = { - {120, 8}, - {112, 16}, - {104, 24}, - {96, 32}, - {88, 40}, - {80, 48}, - {72, 56}, - {64, 64}, - {56, 72}, - {48, 80}, - {40, 88}, - {32, 96}, - {24, 104}, - {16, 112}, - {8, 120} -}; - -#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, \ - filt0, filt1, filt2, filt3) \ -( { \ - v8i16 tmp0, tmp1; \ - \ - tmp0 = __msa_dotp_s_h((v16i8) vec0, (v16i8) filt0); \ - tmp0 = __msa_dpadd_s_h(tmp0, (v16i8) vec1, (v16i8) filt1); \ - tmp1 = __msa_dotp_s_h((v16i8) vec2, (v16i8) filt2); \ - tmp1 = __msa_dpadd_s_h(tmp1, (v16i8) vec3, (v16i8) filt3); \ - tmp0 = __msa_adds_s_h(tmp0, tmp1); \ - \ - tmp0; \ -} ) - -#define HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, \ - filt_h0, filt_h1, filt_h2, filt_h3) \ -( { \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ - v8i16 hz_out_m; \ - \ - VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3, \ - vec0_m, vec1_m, vec2_m, vec3_m); \ - hz_out_m = FILT_8TAP_DPADD_S_H(vec0_m, vec1_m, vec2_m, vec3_m, \ - filt_h0, filt_h1, filt_h2, filt_h3); \ - \ - hz_out_m = __msa_srari_h(hz_out_m, 7); \ - hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ - \ - hz_out_m; \ -} ) - -#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, mask2, mask3, \ - filt0, filt1, filt2, filt3, \ - out0, out1) \ -{ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ - v8i16 res0_m, res1_m, res2_m, res3_m; \ - \ - VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ - DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m); \ - VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ - DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m); \ - VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ - DOTP_SB2_SH(vec4_m, vec5_m, filt2, filt2, res2_m, res3_m); \ - VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \ - DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m); \ - ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1); \ -} - -#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, mask2, mask3, \ - filt0, filt1, filt2, filt3, \ - out0, out1, out2, out3) \ -{ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ - v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m; \ - \ - VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ - DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ - res0_m, res1_m, res2_m, res3_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \ - DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt2, filt2, filt2, filt2, \ - res4_m, res5_m, res6_m, res7_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4_m, vec5_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \ - DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt1, filt1, filt1, filt1, \ - res0_m, res1_m, res2_m, res3_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec4_m, vec5_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \ - DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt3, filt3, filt3, filt3, \ - res4_m, res5_m, res6_m, res7_m); \ - ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m, \ - res7_m, out0, out1, out2, out3); \ -} - -#define PCKEV_XORI128_AVG_ST_UB(in0, in1, dst, pdst) \ -{ \ - v16u8 tmp_m; \ - \ - tmp_m = PCKEV_XORI128_UB(in1, in0); \ - tmp_m = __msa_aver_u_b(tmp_m, (v16u8) dst); \ - ST_UB(tmp_m, (pdst)); \ -} - -#define PCKEV_AVG_ST_UB(in0, in1, dst, pdst) \ -{ \ - v16u8 tmp_m; \ - \ - tmp_m = (v16u8) __msa_pckev_b((v16i8) in0, (v16i8) in1); \ - tmp_m = __msa_aver_u_b(tmp_m, (v16u8) dst); \ - ST_UB(tmp_m, (pdst)); \ -} - -#define PCKEV_AVG_ST8x4_UB(in0, in1, in2, in3, dst0, dst1, \ - pdst, stride) \ -{ \ - v16u8 tmp0_m, tmp1_m; \ - uint8_t *pdst_m = (uint8_t *) (pdst); \ - \ - PCKEV_B2_UB(in1, in0, in3, in2, tmp0_m, tmp1_m); \ - AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \ - ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, pdst_m, stride); \ -} - -static void common_hz_8t_4x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 mask0, mask1, mask2, mask3, out; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v8i16 filt, out0, out1; - - mask0 = LD_UB(&mc_filt_mask_arr[16]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, out1); - SRARI_H2_SH(out0, out1, 7); - SAT_SH2_SH(out0, out1, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_hz_8t_4x8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 filt0, filt1, filt2, filt3; - v16i8 src0, src1, src2, src3; - v16u8 mask0, mask1, mask2, mask3, out; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[16]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (4 * src_stride); - HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, out1); - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); - out = PCKEV_XORI128_UB(out2, out3); - ST_W4(out, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride); -} - -static void common_hz_8t_4w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) { - common_hz_8t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } else if (8 == height) { - common_hz_8t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } -} - -static void common_hz_8t_8x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, tmp0, tmp1; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, out1, - out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - tmp0 = PCKEV_XORI128_UB(out0, out1); - tmp1 = PCKEV_XORI128_UB(out2, out3); - ST_D4(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride); -} - -static void common_hz_8t_8x8mult_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, tmp0, tmp1; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (4 * src_stride); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, - out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - tmp0 = PCKEV_XORI128_UB(out0, out1); - tmp1 = PCKEV_XORI128_UB(out2, out3); - ST_D4(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride); - dst += (4 * dst_stride); - } -} - -static void common_hz_8t_8w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) { - common_hz_8t_8x4_msa(src, src_stride, dst, dst_stride, filter); - } else { - common_hz_8t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, - height); - } -} - -static void common_hz_8t_16w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, out; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = (height >> 1); loop_cnt--;) { - LD_SB2(src, src_stride, src0, src2); - LD_SB2(src + 8, src_stride, src1, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (2 * src_stride); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, - out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst); - dst += dst_stride; - } -} - -static void common_hz_8t_32w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, out; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = (height >> 1); loop_cnt--;) { - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src3 = LD_SB(src + 24); - src1 = __msa_sldi_b(src2, src0, 8); - src += src_stride; - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, - out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src3 = LD_SB(src + 24); - src1 = __msa_sldi_b(src2, src0, 8); - src += src_stride; - - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst); - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst + 16); - dst += dst_stride; - - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, - out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst); - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst + 16); - dst += dst_stride; - } -} - -static void common_hz_8t_64w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - int32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, out; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = height; loop_cnt--;) { - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src3 = LD_SB(src + 24); - src1 = __msa_sldi_b(src2, src0, 8); - - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, - mask2, mask3, filt0, filt1, filt2, filt3, - out0, out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst); - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst + 16); - - src0 = LD_SB(src + 32); - src2 = LD_SB(src + 48); - src3 = LD_SB(src + 56); - src1 = __msa_sldi_b(src2, src0, 8); - src += src_stride; - - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, - mask2, mask3, filt0, filt1, filt2, filt3, - out0, out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst + 32); - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst + 48); - dst += dst_stride; - } -} - -static void common_vt_8t_4w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776; - v16i8 src10998, filt0, filt1, filt2, filt3; - v16u8 out; - v8i16 filt, out10, out32; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, - src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src2110, - src4332, src6554); - XORI_B3_128_SB(src2110, src4332, src6554); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - src += (4 * src_stride); - - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - ILVR_D2_SB(src87_r, src76_r, src109_r, src98_r, src8776, src10998); - XORI_B2_128_SB(src8776, src10998); - out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776, filt0, - filt1, filt2, filt3); - out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998, filt0, - filt1, filt2, filt3); - SRARI_H2_SH(out10, out32, 7); - SAT_SH2_SH(out10, out32, 7); - out = PCKEV_XORI128_UB(out10, out32); - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - - src2110 = src6554; - src4332 = src8776; - src6554 = src10998; - src6 = src10; - } -} - -static void common_vt_8t_8w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3; - v16u8 tmp0, tmp1; - v8i16 filt, out0_r, out1_r, out2_r, out3_r; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, - src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src += (4 * src_stride); - - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, - filt1, filt2, filt3); - out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, - filt1, filt2, filt3); - out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, - filt1, filt2, filt3); - out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0, - filt1, filt2, filt3); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); - tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); - ST_D4(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride); - dst += (4 * dst_stride); - - src10_r = src54_r; - src32_r = src76_r; - src54_r = src98_r; - src21_r = src65_r; - src43_r = src87_r; - src65_r = src109_r; - src6 = src10; - } -} - -static void common_vt_8t_16w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 filt0, filt1, filt2, filt3; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l; - v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l; - v16u8 tmp0, tmp1, tmp2, tmp3; - v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, - src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l, - src54_l, src21_l); - ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src += (4 * src_stride); - - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l, - src87_l, src98_l, src109_l); - out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, - filt1, filt2, filt3); - out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, - filt1, filt2, filt3); - out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, - filt1, filt2, filt3); - out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0, - filt1, filt2, filt3); - out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0, - filt1, filt2, filt3); - out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0, - filt1, filt2, filt3); - out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0, - filt1, filt2, filt3); - out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0, - filt1, filt2, filt3); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 7); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); - PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, - out3_r, tmp0, tmp1, tmp2, tmp3); - XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); - ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); - dst += (4 * dst_stride); - - src10_r = src54_r; - src32_r = src76_r; - src54_r = src98_r; - src21_r = src65_r; - src43_r = src87_r; - src65_r = src109_r; - src10_l = src54_l; - src32_l = src76_l; - src54_l = src98_l; - src21_l = src65_l; - src43_l = src87_l; - src65_l = src109_l; - src6 = src10; - } -} - -static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height, - int32_t width) -{ - const uint8_t *src_tmp; - uint8_t *dst_tmp; - uint32_t loop_cnt, cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 filt0, filt1, filt2, filt3; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l; - v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l; - v16u8 tmp0, tmp1, tmp2, tmp3; - v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - for (cnt = (width >> 4); cnt--;) { - src_tmp = src; - dst_tmp = dst; - - LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6); - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - src_tmp += (7 * src_stride); - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, - src32_r, src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, - src32_l, src54_l, src21_l); - ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src_tmp, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src_tmp += (4 * src_stride); - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l, - src87_l, src98_l, src109_l); - out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, - filt0, filt1, filt2, filt3); - out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, - filt0, filt1, filt2, filt3); - out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, - filt0, filt1, filt2, filt3); - out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, - filt0, filt1, filt2, filt3); - out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, - filt0, filt1, filt2, filt3); - out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, - filt0, filt1, filt2, filt3); - out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, - filt0, filt1, filt2, filt3); - out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, - filt0, filt1, filt2, filt3); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 7); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); - PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, - out3_r, tmp0, tmp1, tmp2, tmp3); - XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); - ST_UB4(tmp0, tmp1, tmp2, tmp3, dst_tmp, dst_stride); - dst_tmp += (4 * dst_stride); - - src10_r = src54_r; - src32_r = src76_r; - src54_r = src98_r; - src21_r = src65_r; - src43_r = src87_r; - src65_r = src109_r; - src10_l = src54_l; - src32_l = src76_l; - src54_l = src98_l; - src21_l = src65_l; - src43_l = src87_l; - src65_l = src109_l; - src6 = src10; - } - - src += 16; - dst += 16; - } -} - -static void common_vt_8t_32w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height, - 32); -} - -static void common_vt_8t_64w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height, - 64); -} - -static void common_hv_8ht_8vt_4w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; - v16u8 mask0, mask1, mask2, mask3, out; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, hz_out8, hz_out9, tmp0, tmp1, out0, out1, out2, out3, out4; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; - - mask0 = LD_UB(&mc_filt_mask_arr[16]); - src -= (3 + 3 * src_stride); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt_hz0, filt_hz1, filt_hz2, filt_hz3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - hz_out0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - SLDI_B2_SH(hz_out2, hz_out0, hz_out4, hz_out2, 8, hz_out1, hz_out3); - - filt = LD_SH(filter_vert); - SPLATI_H4_SH(filt, 0, 1, 2, 3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); - out2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src += (4 * src_stride); - - hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - hz_out6 = (v8i16) __msa_sldi_b((v16i8) hz_out7, (v16i8) hz_out5, 8); - out3 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6); - tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - hz_out8 = (v8i16) __msa_sldi_b((v16i8) hz_out9, (v16i8) hz_out7, 8); - out4 = (v8i16) __msa_ilvev_b((v16i8) hz_out9, (v16i8) hz_out8); - tmp1 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out4, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - SRARI_H2_SH(tmp0, tmp1, 7); - SAT_SH2_SH(tmp0, tmp1, 7); - out = PCKEV_XORI128_UB(tmp0, tmp1); - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - - hz_out5 = hz_out9; - out0 = out2; - out1 = out3; - out2 = out4; - } -} - -static void common_hv_8ht_8vt_8w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; - v16u8 mask0, mask1, mask2, mask3, vec0, vec1; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, hz_out8, hz_out9, hz_out10, tmp0, tmp1, tmp2, tmp3; - v8i16 out0, out1, out2, out3, out4, out5, out6, out7, out8, out9; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= (3 + 3 * src_stride); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt_hz0, filt_hz1, filt_hz2, filt_hz3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - hz_out0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - - filt = LD_SH(filter_vert); - SPLATI_H4_SH(filt, 0, 1, 2, 3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); - ILVEV_B2_SH(hz_out4, hz_out5, hz_out1, hz_out2, out2, out4); - ILVEV_B2_SH(hz_out3, hz_out4, hz_out5, hz_out6, out5, out6); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - src += (4 * src_stride); - - XORI_B4_128_SB(src7, src8, src9, src10); - - hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out3 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6); - tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out7 = (v8i16) __msa_ilvev_b((v16i8) hz_out8, (v16i8) hz_out7); - tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out8 = (v8i16) __msa_ilvev_b((v16i8) hz_out9, (v16i8) hz_out8); - tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, - filt_vt1, filt_vt2, filt_vt3); - - hz_out10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out9 = (v8i16) __msa_ilvev_b((v16i8) hz_out10, (v16i8) hz_out9); - tmp3 = FILT_8TAP_DPADD_S_H(out5, out6, out7, out9, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - vec0 = PCKEV_XORI128_UB(tmp0, tmp1); - vec1 = PCKEV_XORI128_UB(tmp2, tmp3); - ST_D4(vec0, vec1, 0, 1, 0, 1, dst, dst_stride); - dst += (4 * dst_stride); - - hz_out6 = hz_out10; - out0 = out2; - out1 = out3; - out2 = out8; - out4 = out6; - out5 = out7; - out6 = out9; - } -} - -static void common_hv_8ht_8vt_16w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 2; multiple8_cnt--;) { - common_hv_8ht_8vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - - src += 8; - dst += 8; - } -} - -static void common_hv_8ht_8vt_32w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 4; multiple8_cnt--;) { - common_hv_8ht_8vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - - src += 8; - dst += 8; - } -} - -static void common_hv_8ht_8vt_64w_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 8; multiple8_cnt--;) { - common_hv_8ht_8vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - - src += 8; - dst += 8; - } -} - -static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 dst0, res; - v16u8 mask0, mask1, mask2, mask3; - v8i16 filt, res0, res1; - - mask0 = LD_UB(&mc_filt_mask_arr[16]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, res0, res1); - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - SRARI_H2_SH(res0, res1, 7); - SAT_SH2_SH(res0, res1, 7); - res = PCKEV_XORI128_UB(res0, res1); - res = (v16u8) __msa_aver_u_b(res, dst0); - ST_W4(res, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, res0, res1, res2, res3; - v16u8 dst0, dst1; - v8i16 filt, vec0, vec1, vec2, vec3; - - mask0 = LD_UB(&mc_filt_mask_arr[16]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (4 * src_stride); - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - LW4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1); - HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, vec0, vec1); - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, vec2, vec3); - SRARI_H4_SH(vec0, vec1, vec2, vec3, 7); - SAT_SH4_SH(vec0, vec1, vec2, vec3, 7); - PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - res0, res1, res2, res3); - ILVR_D2_UB(res1, res0, res3, res2, res0, res2); - XORI_B2_128_UB(res0, res2); - AVER_UB2_UB(res0, dst0, res2, dst1, res0, res2); - ST_W8(res0, res2, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - if (4 == height) { - common_hz_8t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, - filter); - } else if (8 == height) { - common_hz_8t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, - filter); - } -} - -static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - int32_t loop_cnt; - int64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, dst0, dst1; - v8i16 filt, out0, out1, out2, out3; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (4 * src_stride); - HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - mask3, filt0, filt1, filt2, filt3, out0, - out1, out2, out3); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, - dst, dst_stride); - dst += (4 * dst_stride); - } -} - -static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - int32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 mask0, mask1, mask2, mask3, dst0, dst1; - v8i16 filt, out0, out1, out2, out3; - v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = height >> 1; loop_cnt--;) { - LD_SB2(src, src_stride, src0, src2); - LD_SB2(src + 8, src_stride, src1, src3); - src += (2 * src_stride); - - XORI_B4_128_SB(src0, src1, src2, src3); - VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, - vec12); - VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, - vec13); - VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10, - vec14); - VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11, - vec15); - DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, - vec1, vec2, vec3); - DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8, - vec9, vec10, vec11); - DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, - vec1, vec2, vec3); - DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, - vec8, vec9, vec10, vec11); - ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, - out1, out2, out3); - LD_UB2(dst, dst_stride, dst0, dst1); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - PCKEV_XORI128_AVG_ST_UB(out1, out0, dst0, dst); - dst += dst_stride; - PCKEV_XORI128_AVG_ST_UB(out3, out2, dst1, dst); - dst += dst_stride; - } -} - -static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 dst1, dst2, mask0, mask1, mask2, mask3; - v8i16 filt, out0, out1, out2, out3; - v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = height; loop_cnt--;) { - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src3 = LD_SB(src + 24); - src1 = __msa_sldi_b(src2, src0, 8); - src += src_stride; - - XORI_B4_128_SB(src0, src1, src2, src3); - VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, - vec12); - VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, - vec13); - VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10, - vec14); - VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11, - vec15); - DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, - vec1, vec2, vec3); - DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8, - vec9, vec10, vec11); - DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, - vec1, vec2, vec3); - DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, - vec8, vec9, vec10, vec11); - ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, - out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - LD_UB2(dst, 16, dst1, dst2); - PCKEV_XORI128_AVG_ST_UB(out1, out0, dst1, dst); - PCKEV_XORI128_AVG_ST_UB(out3, out2, dst2, dst + 16); - dst += dst_stride; - } -} - -static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - uint32_t loop_cnt, cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; - v16u8 dst1, dst2, mask0, mask1, mask2, mask3; - v8i16 filt, out0, out1, out2, out3; - v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= 3; - - /* rearranging filter */ - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - for (loop_cnt = height; loop_cnt--;) { - for (cnt = 0; cnt < 2; ++cnt) { - src0 = LD_SB(&src[cnt << 5]); - src2 = LD_SB(&src[16 + (cnt << 5)]); - src3 = LD_SB(&src[24 + (cnt << 5)]); - src1 = __msa_sldi_b(src2, src0, 8); - - XORI_B4_128_SB(src0, src1, src2, src3); - VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, - vec12); - VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, - vec13); - VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, - vec10, vec14); - VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, - vec11, vec15); - DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, - vec8, vec9, vec10, vec11); - DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, - vec0, vec1, vec2, vec3); - DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, - vec8, vec9, vec10, vec11); - ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, - out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - LD_UB2(&dst[cnt << 5], 16, dst1, dst2); - PCKEV_XORI128_AVG_ST_UB(out1, out0, dst1, &dst[cnt << 5]); - PCKEV_XORI128_AVG_ST_UB(out3, out2, dst2, &dst[16 + (cnt << 5)]); - } - - src += src_stride; - dst += dst_stride; - } -} - -static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - uint32_t loop_cnt; - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16u8 dst0, out; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776; - v16i8 src10998, filt0, filt1, filt2, filt3; - v8i16 filt, out10, out32; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, - src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src2110, - src4332, src6554); - XORI_B3_128_SB(src2110, src4332, src6554); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - src += (4 * src_stride); - - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - ILVR_D2_SB(src87_r, src76_r, src109_r, src98_r, src8776, src10998); - XORI_B2_128_SB(src8776, src10998); - out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776, filt0, - filt1, filt2, filt3); - out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998, filt0, - filt1, filt2, filt3); - SRARI_H2_SH(out10, out32, 7); - SAT_SH2_SH(out10, out32, 7); - out = PCKEV_XORI128_UB(out10, out32); - out = __msa_aver_u_b(out, dst0); - - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - - src2110 = src6554; - src4332 = src8776; - src6554 = src10998; - src6 = src10; - } -} - -static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - uint32_t loop_cnt; - uint64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16u8 dst0, dst1; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3; - v8i16 filt, out0, out1, out2, out3; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, - src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - src += (4 * src_stride); - - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - XORI_B4_128_SB(src7, src8, src9, src10); - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, - filt1, filt2, filt3); - out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, - filt1, filt2, filt3); - out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, - filt1, filt2, filt3); - out3 = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0, - filt1, filt2, filt3); - SRARI_H4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out0, out1, out2, out3, 7); - CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, - dst, dst_stride); - dst += (4 * dst_stride); - - src10_r = src54_r; - src32_r = src76_r; - src54_r = src98_r; - src21_r = src65_r; - src43_r = src87_r; - src65_r = src109_r; - src6 = src10; - } -} - -static void common_vt_8t_and_aver_dst_16w_mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter, - int32_t height, - int32_t width) -{ - const uint8_t *src_tmp; - uint8_t *dst_tmp; - uint32_t loop_cnt, cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l; - v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l; - v16i8 filt0, filt1, filt2, filt3; - v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3; - v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt; - - src -= (3 * src_stride); - - filt = LD_SH(filter); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3); - - for (cnt = (width >> 4); cnt--;) { - src_tmp = src; - dst_tmp = dst; - - LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6); - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - src_tmp += (7 * src_stride); - - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, - src32_r, src54_r, src21_r); - ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, - src32_l, src54_l, src21_l); - ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src_tmp, src_stride, src7, src8, src9, src10); - src_tmp += (4 * src_stride); - - LD_UB4(dst_tmp, dst_stride, dst0, dst1, dst2, dst3); - XORI_B4_128_SB(src7, src8, src9, src10); - ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l, - src87_l, src98_l, src109_l); - out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, - filt0, filt1, filt2, filt3); - out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, - filt0, filt1, filt2, filt3); - out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, - filt0, filt1, filt2, filt3); - out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, - filt0, filt1, filt2, filt3); - out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, - filt0, filt1, filt2, filt3); - out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, - filt0, filt1, filt2, filt3); - out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, - filt0, filt1, filt2, filt3); - out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, - filt0, filt1, filt2, filt3); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 7); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); - PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, - out3_r, tmp0, tmp1, tmp2, tmp3); - XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); - AVER_UB4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, - dst0, dst1, dst2, dst3); - ST_UB4(dst0, dst1, dst2, dst3, dst_tmp, dst_stride); - dst_tmp += (4 * dst_stride); - - src10_r = src54_r; - src32_r = src76_r; - src54_r = src98_r; - src21_r = src65_r; - src43_r = src87_r; - src65_r = src109_r; - src10_l = src54_l; - src32_l = src76_l; - src54_l = src98_l; - src21_l = src65_l; - src43_l = src87_l; - src65_l = src109_l; - src6 = src10; - } - - src += 16; - dst += 16; - } -} - -static void common_vt_8t_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride, - filter, height, 16); -} - -static void common_vt_8t_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride, - filter, height, 32); -} - -static void common_vt_8t_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride, - filter, height, 64); -} - -static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16u8 dst0, res, mask0, mask1, mask2, mask3; - v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, hz_out8, hz_out9, res0, res1, vec0, vec1, vec2, vec3, vec4; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; - - mask0 = LD_UB(&mc_filt_mask_arr[16]); - src -= (3 + 3 * src_stride); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt_hz0, filt_hz1, filt_hz2, filt_hz3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - hz_out0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - SLDI_B2_SH(hz_out2, hz_out0, hz_out4, hz_out2, 8, hz_out1, hz_out3); - - filt = LD_SH(filter_vert); - SPLATI_H4_SH(filt, 0, 1, 2, 3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - vec2 = (v8i16) __msa_ilvev_b((v16i8) hz_out5, (v16i8) hz_out4); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src += (4 * src_stride); - - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - hz_out6 = (v8i16) __msa_sldi_b((v16i8) hz_out7, (v16i8) hz_out5, 8); - vec3 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6); - res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - hz_out8 = (v8i16) __msa_sldi_b((v16i8) hz_out9, (v16i8) hz_out7, 8); - vec4 = (v8i16) __msa_ilvev_b((v16i8) hz_out9, (v16i8) hz_out8); - res1 = FILT_8TAP_DPADD_S_H(vec1, vec2, vec3, vec4, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - SRARI_H2_SH(res0, res1, 7); - SAT_SH2_SH(res0, res1, 7); - res = PCKEV_XORI128_UB(res0, res1); - res = (v16u8) __msa_aver_u_b(res, dst0); - ST_W4(res, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - - hz_out5 = hz_out9; - vec0 = vec2; - vec1 = vec3; - vec2 = vec4; - } -} - -static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - uint64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; - v16u8 dst0, dst1, mask0, mask1, mask2, mask3; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, hz_out8, hz_out9, hz_out10, tmp0, tmp1, tmp2, tmp3; - v8i16 out0, out1, out2, out3, out4, out5, out6, out7, out8, out9; - - mask0 = LD_UB(&mc_filt_mask_arr[0]); - src -= (3 + 3 * src_stride); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - SPLATI_H4_SB(filt, 0, 1, 2, 3, filt_hz0, filt_hz1, filt_hz2, filt_hz3); - - mask1 = mask0 + 2; - mask2 = mask0 + 4; - mask3 = mask0 + 6; - - LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); - src += (7 * src_stride); - - XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); - hz_out0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - hz_out6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3, filt_hz0, - filt_hz1, filt_hz2, filt_hz3); - - filt = LD_SH(filter_vert); - SPLATI_H4_SH(filt, 0, 1, 2, 3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); - ILVEV_B2_SH(hz_out4, hz_out5, hz_out1, hz_out2, out2, out4); - ILVEV_B2_SH(hz_out3, hz_out4, hz_out5, hz_out6, out5, out6); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src += (4 * src_stride); - - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - - hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out3 = (v8i16) __msa_ilvev_b((v16i8) hz_out7, (v16i8) hz_out6); - tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out7 = (v8i16) __msa_ilvev_b((v16i8) hz_out8, (v16i8) hz_out7); - tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out8 = (v8i16) __msa_ilvev_b((v16i8) hz_out9, (v16i8) hz_out8); - tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - hz_out10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); - out9 = (v8i16) __msa_ilvev_b((v16i8) hz_out10, (v16i8) hz_out9); - tmp3 = FILT_8TAP_DPADD_S_H(out5, out6, out7, out9, filt_vt0, filt_vt1, - filt_vt2, filt_vt3); - - SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - CONVERT_UB_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, - dst, dst_stride); - dst += (4 * dst_stride); - - hz_out6 = hz_out10; - out0 = out2; - out1 = out3; - out2 = out8; - out4 = out6; - out5 = out7; - out6 = out9; - } -} - -static void common_hv_8ht_8vt_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 2; multiple8_cnt--;) { - common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert, - height); - - src += 8; - dst += 8; - } -} - -static void common_hv_8ht_8vt_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 4; multiple8_cnt--;) { - common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert, - height); - - src += 8; - dst += 8; - } -} - -static void common_hv_8ht_8vt_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 8; multiple8_cnt--;) { - common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert, - height); - - src += 8; - dst += 8; - } -} - -static void common_hz_2t_4x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, mask; - v16u8 filt0, vec0, vec1, res0, res1; - v8u16 vec2, vec3, filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); - SRARI_H2_UH(vec2, vec3, 7); - PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1); - ST_W2(res0, 0, 1, dst, dst_stride); - ST_W2(res1, 0, 1, dst + 2 * dst_stride, dst_stride); -} - -static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 vec0, vec1, vec2, vec3, filt0; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16i8 res0, res1, res2, res3; - v8u16 vec4, vec5, vec6, vec7, filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); - VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec4, vec5, vec6, vec7); - SRARI_H4_UH(vec4, vec5, vec6, vec7, 7); - PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, - res0, res1, res2, res3); - ST_W2(res0, 0, 1, dst, dst_stride); - ST_W2(res1, 0, 1, dst + 2 * dst_stride, dst_stride); - ST_W2(res2, 0, 1, dst + 4 * dst_stride, dst_stride); - ST_W2(res3, 0, 1, dst + 6 * dst_stride, dst_stride); -} - -void ff_put_bilin_4h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - - if (4 == height) { - common_hz_2t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } else if (8 == height) { - common_hz_2t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } -} - -static void common_hz_2t_8x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 filt0; - v16i8 src0, src1, src2, src3, mask; - v8u16 vec0, vec1, vec2, vec3, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1); - ST_D4(src0, src1, 0, 1, 0, 1, dst, dst_stride); -} - -static void common_hz_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - v16u8 filt0; - v16i8 src0, src1, src2, src3, mask, out0, out1; - v8u16 vec0, vec1, vec2, vec3, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst + 4 * dst_stride, dst_stride); - dst += (8 * dst_stride); - - if (16 == height) { - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst + 4 * dst_stride, dst_stride); - } -} - -void ff_put_bilin_8h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - - if (4 == height) { - common_hz_2t_8x4_msa(src, src_stride, dst, dst_stride, filter); - } else { - common_hz_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, - height); - } -} - -void ff_put_bilin_16h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - loop_cnt = (height >> 2) - 1; - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, 7); - SRARI_H4_UH(out4, out5, out6, out7, 7); - PCKEV_ST_SB(out0, out1, dst); - dst += dst_stride; - PCKEV_ST_SB(out2, out3, dst); - dst += dst_stride; - PCKEV_ST_SB(out4, out5, dst); - dst += dst_stride; - PCKEV_ST_SB(out6, out7, dst); - dst += dst_stride; - - for (; loop_cnt--;) { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, 7); - SRARI_H4_UH(out4, out5, out6, out7, 7); - PCKEV_ST_SB(out0, out1, dst); - dst += dst_stride; - PCKEV_ST_SB(out2, out3, dst); - dst += dst_stride; - PCKEV_ST_SB(out4, out5, dst); - dst += dst_stride; - PCKEV_ST_SB(out6, out7, dst); - dst += dst_stride; - } -} - -void ff_put_bilin_32h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - for (loop_cnt = height >> 1; loop_cnt--;) { - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src3 = LD_SB(src + 24); - src1 = __msa_sldi_b(src2, src0, 8); - src += src_stride; - src4 = LD_SB(src); - src6 = LD_SB(src + 16); - src7 = LD_SB(src + 24); - src5 = __msa_sldi_b(src6, src4, 8); - src += src_stride; - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, 7); - SRARI_H4_UH(out4, out5, out6, out7, 7); - PCKEV_ST_SB(out0, out1, dst); - PCKEV_ST_SB(out2, out3, dst + 16); - dst += dst_stride; - PCKEV_ST_SB(out4, out5, dst); - PCKEV_ST_SB(out6, out7, dst + 16); - dst += dst_stride; - } -} - -void ff_put_bilin_64h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - for (loop_cnt = height; loop_cnt--;) { - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src4 = LD_SB(src + 32); - src6 = LD_SB(src + 48); - src7 = LD_SB(src + 56); - SLDI_B3_SB(src2, src0, src4, src2, src6, src4, 8, src1, src3, src5); - src += src_stride; - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, 7); - SRARI_H4_UH(out4, out5, out6, out7, 7); - PCKEV_ST_SB(out0, out1, dst); - PCKEV_ST_SB(out2, out3, dst + 16); - PCKEV_ST_SB(out4, out5, dst + 32); - PCKEV_ST_SB(out6, out7, dst + 48); - dst += dst_stride; - } -} - -static void common_vt_2t_4x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, src4; - v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332; - v16u8 filt0; - v8i16 filt; - v8u16 tmp0, tmp1; - - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); - - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, - src10_r, src21_r, src32_r, src43_r); - ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); - DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - src2110 = __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0); - ST_W4(src2110, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_vt_2t_4x8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776; - v8u16 tmp0, tmp1, tmp2, tmp3; - v16u8 filt0; - v8i16 filt; - - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - - src8 = LD_SB(src); - src += src_stride; - - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, - src32_r, src43_r); - ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, - src76_r, src87_r); - ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, - src87_r, src76_r, src2110, src4332, src6554, src8776); - DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332); - ST_W8(src2110, src4332, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride); -} - -void ff_put_bilin_4v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_vt_2t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } else if (8 == height) { - common_vt_2t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } -} - -static void common_vt_2t_8x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0; - v16i8 out0, out1; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_UB5(src, src_stride, src0, src1, src2, src3, src4); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); - ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride); -} - -static void common_vt_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v16i8 out0, out1; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 3); loop_cnt--;) { - LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); - src += (8 * src_stride); - - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, - vec0, vec1, vec2, vec3); - ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, - vec4, vec5, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride); - - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst + 4 * dst_stride, dst_stride); - dst += (8 * dst_stride); - - src0 = src8; - } -} - -void ff_put_bilin_8v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_vt_2t_8x4_msa(src, src_stride, dst, dst_stride, filter); - } else { - common_vt_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, - height); - } -} - -void ff_put_bilin_16v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - v16u8 src0, src1, src2, src3, src4; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst); - dst += dst_stride; - - ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6); - ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst); - dst += dst_stride; - - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst); - dst += dst_stride; - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst); - dst += dst_stride; - - src0 = src4; - } -} - -void ff_put_bilin_32v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - src0 = LD_UB(src); - src5 = LD_UB(src + 16); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - - LD_UB4(src + 16, src_stride, src6, src7, src8, src9); - src += (4 * src_stride); - - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst); - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst + dst_stride); - - ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6); - ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst + 2 * dst_stride); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst + 3 * dst_stride); - - ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2); - ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst + 16); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst + 16 + dst_stride); - - ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6); - ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst + 16 + 2 * dst_stride); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst + 16 + 3 * dst_stride); - dst += (4 * dst_stride); - - src0 = src4; - src5 = src9; - } -} - -void ff_put_bilin_64v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; - v16u8 src11, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_UB4(src, 16, src0, src3, src6, src9); - src += src_stride; - - for (loop_cnt = (height >> 1); loop_cnt--;) { - LD_UB2(src, src_stride, src1, src2); - LD_UB2(src + 16, src_stride, src4, src5); - LD_UB2(src + 32, src_stride, src7, src8); - LD_UB2(src + 48, src_stride, src10, src11); - src += (2 * src_stride); - - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst + dst_stride); - - ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6); - ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); - SRARI_H2_UH(tmp4, tmp5, 7); - SAT_UH2_UH(tmp4, tmp5, 7); - PCKEV_ST_SB(tmp4, tmp5, dst + 16); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); - SRARI_H2_UH(tmp6, tmp7, 7); - SAT_UH2_UH(tmp6, tmp7, 7); - PCKEV_ST_SB(tmp6, tmp7, dst + 16 + dst_stride); - - ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2); - ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_ST_SB(tmp0, tmp1, dst + 32); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_ST_SB(tmp2, tmp3, dst + 32 + dst_stride); - - ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6); - ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); - SRARI_H2_UH(tmp4, tmp5, 7); - SAT_UH2_UH(tmp4, tmp5, 7); - PCKEV_ST_SB(tmp4, tmp5, dst + 48); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); - SRARI_H2_UH(tmp6, tmp7, 7); - SAT_UH2_UH(tmp6, tmp7, 7); - PCKEV_ST_SB(tmp6, tmp7, dst + 48 + dst_stride); - dst += (2 * dst_stride); - - src0 = src2; - src3 = src5; - src6 = src8; - src9 = src11; - } -} - -static void common_hv_2ht_2vt_4x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, const int8_t *filter_vert) -{ - v16i8 src0, src1, src2, src3, src4, mask; - v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, filt, tmp0, tmp1; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h((v8i16) filt, 0); - - filt = LD_UH(filter_vert); - filt_vt = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, 7); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - hz_out1 = (v8u16) __msa_sldi_b((v16i8) hz_out2, (v16i8) hz_out0, 8); - hz_out3 = (v8u16) __msa_pckod_d((v2i64) hz_out4, (v2i64) hz_out2); - - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1); - ST_W2(res0, 0, 1, dst, dst_stride); - ST_W2(res1, 0, 1, dst + 2 * dst_stride, dst_stride); -} - -static void common_hv_2ht_2vt_4x8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, const int8_t *filter_vert) -{ - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; - v16i8 res0, res1, res2, res3; - v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h((v8i16) filt, 0); - - filt = LD_UH(filter_vert); - filt_vt = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - src8 = LD_SB(src); - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, 7); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, 7); - hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, 7); - hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, 7); - SLDI_B3_UH(hz_out2, hz_out0, hz_out4, hz_out2, hz_out6, hz_out4, 8, hz_out1, - hz_out3, hz_out5); - hz_out7 = (v8u16) __msa_pckod_d((v2i64) hz_out8, (v2i64) hz_out6); - - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, - vec4, vec5, vec6, vec7); - SRARI_H4_UH(vec4, vec5, vec6, vec7, 7); - SAT_UH4_UH(vec4, vec5, vec6, vec7, 7); - PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, - res0, res1, res2, res3); - ST_W2(res0, 0, 1, dst, dst_stride); - ST_W2(res1, 0, 1, dst + 2 * dst_stride, dst_stride); - ST_W2(res2, 0, 1, dst + 4 * dst_stride, dst_stride); - ST_W2(res3, 0, 1, dst + 6 * dst_stride, dst_stride); -} - -void ff_put_bilin_4hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter_horiz = vp9_bilinear_filters_msa[mx - 1]; - const int8_t *filter_vert = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_hv_2ht_2vt_4x4_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } else if (8 == height) { - common_hv_2ht_2vt_4x8_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } -} - -static void common_hv_2ht_2vt_8x4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, const int8_t *filter_vert) -{ - v16i8 src0, src1, src2, src3, src4, mask, out0, out1; - v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; - v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp0 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - vec1 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp1 = __msa_dotp_u_h(vec1, filt_vt); - - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - vec2 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp2 = __msa_dotp_u_h(vec2, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - vec3 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp3 = __msa_dotp_u_h(vec3, filt_vt); - - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride); -} - -static void common_hv_2ht_2vt_8x8mult_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, mask, out0, out1; - v16u8 filt_hz, filt_vt, vec0; - v8u16 hz_out0, hz_out1, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - src0 = LD_SB(src); - src += src_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - - for (loop_cnt = (height >> 3); loop_cnt--;) { - LD_SB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp1 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp2 = __msa_dotp_u_h(vec0, filt_vt); - - SRARI_H2_UH(tmp1, tmp2, 7); - SAT_UH2_UH(tmp1, tmp2, 7); - - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp3 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - LD_SB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp4 = __msa_dotp_u_h(vec0, filt_vt); - - SRARI_H2_UH(tmp3, tmp4, 7); - SAT_UH2_UH(tmp3, tmp4, 7); - PCKEV_B2_SB(tmp2, tmp1, tmp4, tmp3, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride); - - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp5 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp6 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp7 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp8 = __msa_dotp_u_h(vec0, filt_vt); - - SRARI_H4_UH(tmp5, tmp6, tmp7, tmp8, 7); - SAT_UH4_UH(tmp5, tmp6, tmp7, tmp8, 7); - PCKEV_B2_SB(tmp6, tmp5, tmp8, tmp7, out0, out1); - ST_D4(out0, out1, 0, 1, 0, 1, dst + 4 * dst_stride, dst_stride); - dst += (8 * dst_stride); - } -} - -void ff_put_bilin_8hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter_horiz = vp9_bilinear_filters_msa[mx - 1]; - const int8_t *filter_vert = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_hv_2ht_2vt_8x4_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } else { - common_hv_2ht_2vt_8x8mult_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert, height); - } -} - -void ff_put_bilin_16hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter_horiz = vp9_bilinear_filters_msa[mx - 1]; - const int8_t *filter_vert = vp9_bilinear_filters_msa[my - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt_hz, filt_vt, vec0, vec1; - v8u16 tmp1, tmp2, hz_out0, hz_out1, hz_out2, hz_out3; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - LD_SB2(src, 8, src0, src1); - src += src_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - - hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, 7); - SAT_UH2_UH(tmp1, tmp2, 7); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, 7); - SAT_UH2_UH(tmp1, tmp2, 7); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; - - hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, 7); - SAT_UH2_UH(tmp1, tmp2, 7); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, 7); - SAT_UH2_UH(tmp1, tmp2, 7); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; - } -} - -void ff_put_bilin_32hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 2; multiple8_cnt--;) { - ff_put_bilin_16hv_msa(dst, dst_stride, src, src_stride, height, mx, my); - - src += 16; - dst += 16; - } -} - -void ff_put_bilin_64hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 4; multiple8_cnt--;) { - ff_put_bilin_16hv_msa(dst, dst_stride, src, src_stride, height, mx, my); - - src += 16; - dst += 16; - } -} - -static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, mask; - v16u8 filt0, dst0, vec0, vec1, res; - v8u16 vec2, vec3, filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); - SRARI_H2_UH(vec2, vec3, 7); - - res = (v16u8) __msa_pckev_b((v16i8) vec3, (v16i8) vec2); - res = (v16u8) __msa_aver_u_b(res, dst0); - - ST_W4(res, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, vec0, vec1, vec2, vec3, res0, res1, res2, res3; - v16u8 dst0, dst1; - v8u16 vec4, vec5, vec6, vec7, filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - LW4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1); - VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); - VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, - vec6, vec7); - SRARI_H4_UH(vec4, vec5, vec6, vec7, 7); - PCKEV_B4_UB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, - res2, res3); - ILVR_D2_UB(res1, res0, res3, res2, res0, res2); - AVER_UB2_UB(res0, dst0, res2, dst1, res0, res2); - ST_W8(res0, res2, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride); -} - -void ff_avg_bilin_4h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - - if (4 == height) { - common_hz_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, - filter); - } else if (8 == height) { - common_hz_2t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, - filter); - } -} - -static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - int64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, mask; - v16u8 filt0, dst0, dst1; - v8u16 vec0, vec1, vec2, vec3, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - PCKEV_AVG_ST8x4_UB(vec0, vec1, vec2, vec3, dst0, dst1, dst, dst_stride); -} - -static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - int64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, mask; - v16u8 filt0, dst0, dst1; - v8u16 vec0, vec1, vec2, vec3, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, - vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - PCKEV_AVG_ST8x4_UB(vec0, vec1, vec2, vec3, dst0, dst1, dst, dst_stride); - dst += (4 * dst_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, - vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - PCKEV_AVG_ST8x4_UB(vec0, vec1, vec2, vec3, dst0, dst1, dst, dst_stride); - dst += (4 * dst_stride); - - if (16 == height) { - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, - vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - LD_SB4(src, src_stride, src0, src1, src2, src3); - PCKEV_AVG_ST8x4_UB(vec0, vec1, vec2, vec3, dst0, dst1, dst, dst_stride); - dst += (4 * dst_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, - vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, 7); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - PCKEV_AVG_ST8x4_UB(vec0, vec1, vec2, vec3, dst0, dst1, dst, dst_stride); - } -} - -void ff_avg_bilin_8h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - - if (4 == height) { - common_hz_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, - filter); - } else { - common_hz_2t_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride, - filter, height); - } -} - -void ff_avg_bilin_16h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, dst0, dst1, dst2, dst3; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 res0, res1, res2, res3, res4, res5, res6, res7, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, res0, res1, - res2, res3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, res4, res5, - res6, res7); - SRARI_H4_UH(res0, res1, res2, res3, 7); - SRARI_H4_UH(res4, res5, res6, res7, 7); - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST_UB(res1, res0, dst0, dst); - dst += dst_stride; - PCKEV_AVG_ST_UB(res3, res2, dst1, dst); - dst += dst_stride; - PCKEV_AVG_ST_UB(res5, res4, dst2, dst); - dst += dst_stride; - PCKEV_AVG_ST_UB(res7, res6, dst3, dst); - dst += dst_stride; - - for (loop_cnt = (height >> 2) - 1; loop_cnt--;) { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, res0, - res1, res2, res3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, res4, - res5, res6, res7); - SRARI_H4_UH(res0, res1, res2, res3, 7); - SRARI_H4_UH(res4, res5, res6, res7, 7); - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST_UB(res1, res0, dst0, dst); - dst += dst_stride; - PCKEV_AVG_ST_UB(res3, res2, dst1, dst); - dst += dst_stride; - PCKEV_AVG_ST_UB(res5, res4, dst2, dst); - dst += dst_stride; - PCKEV_AVG_ST_UB(res7, res6, dst3, dst); - dst += dst_stride; - } -} - -void ff_avg_bilin_32h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, dst0, dst1, dst2, dst3; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 res0, res1, res2, res3, res4, res5, res6, res7, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - for (loop_cnt = (height >> 1); loop_cnt--;) { - src0 = LD_SB(src); - src2 = LD_SB(src + 16); - src3 = LD_SB(src + 24); - src1 = __msa_sldi_b(src2, src0, 8); - src += src_stride; - src4 = LD_SB(src); - src6 = LD_SB(src + 16); - src7 = LD_SB(src + 24); - src5 = __msa_sldi_b(src6, src4, 8); - src += src_stride; - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - res0, res1, res2, res3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - res4, res5, res6, res7); - SRARI_H4_UH(res0, res1, res2, res3, 7); - SRARI_H4_UH(res4, res5, res6, res7, 7); - LD_UB2(dst, 16, dst0, dst1); - PCKEV_AVG_ST_UB(res1, res0, dst0, dst); - PCKEV_AVG_ST_UB(res3, res2, dst1, (dst + 16)); - dst += dst_stride; - LD_UB2(dst, 16, dst2, dst3); - PCKEV_AVG_ST_UB(res5, res4, dst2, dst); - PCKEV_AVG_ST_UB(res7, res6, dst3, (dst + 16)); - dst += dst_stride; - } -} - -void ff_avg_bilin_64h_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[mx - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, dst0, dst1, dst2, dst3; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - for (loop_cnt = height; loop_cnt--;) { - LD_SB4(src, 16, src0, src2, src4, src6); - src7 = LD_SB(src + 56); - SLDI_B3_SB(src2, src0, src4, src2, src6, src4, 8, src1, src3, src5); - src += src_stride; - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, 7); - SRARI_H4_UH(out4, out5, out6, out7, 7); - LD_UB4(dst, 16, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST_UB(out1, out0, dst0, dst); - PCKEV_AVG_ST_UB(out3, out2, dst1, dst + 16); - PCKEV_AVG_ST_UB(out5, out4, dst2, dst + 32); - PCKEV_AVG_ST_UB(out7, out6, dst3, dst + 48); - dst += dst_stride; - } -} - -static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4; - v16u8 dst0, out, filt0, src2110, src4332; - v16i8 src10_r, src32_r, src21_r, src43_r; - v8i16 filt; - v8u16 tmp0, tmp1; - - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - src4 = LD_SB(src); - src += src_stride; - - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, - src10_r, src21_r, src32_r, src43_r); - ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); - DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - - out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0); - out = __msa_aver_u_b(out, dst0); - - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - const int8_t *filter) -{ - uint32_t tp0, tp1, tp2, tp3; - v16u8 dst0, dst1; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src87_r; - v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; - v16u8 src2110, src4332, src6554, src8776, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - src8 = LD_SB(src); - - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - LW4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1); - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, - src32_r, src43_r); - ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, - src76_r, src87_r); - ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, - src87_r, src76_r, src2110, src4332, src6554, src8776); - DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src2110, src4332); - AVER_UB2_UB(src2110, dst0, src4332, dst1, src2110, src4332); - ST_W8(src2110, src4332, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride); -} - -void ff_avg_bilin_4v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_vt_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, - filter); - } else if (8 == height) { - common_vt_2t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, - filter); - } -} - -static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter) -{ - int64_t tp0, tp1, tp2, tp3; - v16u8 src0, src1, src2, src3, src4; - v16u8 dst0, dst1, vec0, vec1, vec2, vec3, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - LD_UB5(src, src_stride, src0, src1, src2, src3, src4); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); - ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst, dst_stride); -} - -static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter, - int32_t height) -{ - uint32_t loop_cnt; - int64_t tp0, tp1, tp2, tp3; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16u8 dst0, dst1, dst2, dst3; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - /* rearranging filter_y */ - filt = LD_SH(filter); - filt0 = (v16u8) __msa_splati_h(filt, 0); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 3); loop_cnt--;) { - LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); - src += (8 * src_stride); - - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - LD4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst2); - INSERT_D2_UB(tp2, tp3, dst3); - - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, - vec0, vec1, vec2, vec3); - ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, - vec4, vec5, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst, dst_stride); - dst += (4 * dst_stride); - - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst2, dst3, dst, dst_stride); - dst += (4 * dst_stride); - - src0 = src8; - } -} - -void ff_avg_bilin_8v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_vt_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, - filter); - } else { - common_vt_2t_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride, - filter, height); - } -} - -void ff_avg_bilin_16v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - v16u8 src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, filt0; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 tmp0, tmp1, tmp2, tmp3, filt; - - /* rearranging filter_y */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - src0 = LD_UB(src); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst); - dst += dst_stride; - - ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6); - ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst); - dst += dst_stride; - - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst); - dst += dst_stride; - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst); - dst += dst_stride; - - src0 = src4; - } -} - -void ff_avg_bilin_32v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3, filt; - - /* rearranging filter_y */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_UB2(src, 16, src0, src5); - src += src_stride; - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_UB4(src, src_stride, src1, src2, src3, src4); - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - - LD_UB4(src + 16, src_stride, src6, src7, src8, src9); - LD_UB4(dst + 16, dst_stride, dst4, dst5, dst6, dst7); - src += (4 * src_stride); - - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride); - - ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6); - ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst + 2 * dst_stride); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst + 3 * dst_stride); - - ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2); - ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 16); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 16 + dst_stride); - - ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6); - ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst6, dst + 16 + 2 * dst_stride); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst7, dst + 16 + 3 * dst_stride); - dst += (4 * dst_stride); - - src0 = src4; - src5 = src9; - } -} - -void ff_avg_bilin_64v_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter = vp9_bilinear_filters_msa[my - 1]; - v16u8 src0, src1, src2, src3, src4, src5; - v16u8 src6, src7, src8, src9, src10, src11, filt0; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - v8u16 filt; - - /* rearranging filter_y */ - filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_UB4(src, 16, src0, src3, src6, src9); - src += src_stride; - - for (loop_cnt = (height >> 1); loop_cnt--;) { - LD_UB2(src, src_stride, src1, src2); - LD_UB2(dst, dst_stride, dst0, dst1); - LD_UB2(src + 16, src_stride, src4, src5); - LD_UB2(dst + 16, dst_stride, dst2, dst3); - LD_UB2(src + 32, src_stride, src7, src8); - LD_UB2(dst + 32, dst_stride, dst4, dst5); - LD_UB2(src + 48, src_stride, src10, src11); - LD_UB2(dst + 48, dst_stride, dst6, dst7); - src += (2 * src_stride); - - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride); - - ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6); - ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); - SRARI_H2_UH(tmp4, tmp5, 7); - SAT_UH2_UH(tmp4, tmp5, 7); - PCKEV_AVG_ST_UB(tmp5, tmp4, dst2, dst + 16); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); - SRARI_H2_UH(tmp6, tmp7, 7); - SAT_UH2_UH(tmp6, tmp7, 7); - PCKEV_AVG_ST_UB(tmp7, tmp6, dst3, dst + 16 + dst_stride); - - ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2); - ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 32); - - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 32 + dst_stride); - - ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6); - ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7); - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5); - SRARI_H2_UH(tmp4, tmp5, 7); - SAT_UH2_UH(tmp4, tmp5, 7); - PCKEV_AVG_ST_UB(tmp5, tmp4, dst6, (dst + 48)); - - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7); - SRARI_H2_UH(tmp6, tmp7, 7); - SAT_UH2_UH(tmp6, tmp7, 7); - PCKEV_AVG_ST_UB(tmp7, tmp6, dst7, dst + 48 + dst_stride); - dst += (2 * dst_stride); - - src0 = src2; - src3 = src5; - src6 = src8; - src9 = src11; - } -} - -static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, mask; - v16u8 filt_hz, filt_vt, vec0, vec1; - v16u8 dst0, out; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, tmp0, tmp1, filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_UH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h((v8i16) filt, 0); - - filt = LD_UH(filter_vert); - filt_vt = (v16u8) __msa_splati_h((v8i16) filt, 0); - - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, 7); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - hz_out1 = (v8u16) __msa_sldi_b((v16i8) hz_out2, (v16i8) hz_out0, 8); - hz_out3 = (v8u16) __msa_pckod_d((v2i64) hz_out4, (v2i64) hz_out2); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - - out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0); - out = __msa_aver_u_b(out, dst0); - - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); -} - -static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert) -{ - uint32_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; - v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1; - v16u8 dst0, dst1; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8u16 hz_out7, hz_out8, tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[16]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - src8 = LD_SB(src); - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, 7); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, 7); - hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, 7); - hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, 7); - SLDI_B3_UH(hz_out2, hz_out0, hz_out4, hz_out2, hz_out6, hz_out4, 8, hz_out1, - hz_out3, hz_out5); - hz_out7 = (v8u16) __msa_pckod_d((v2i64) hz_out8, (v2i64) hz_out6); - - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - LW4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, res0, res1); - AVER_UB2_UB(res0, dst0, res1, dst1, res0, res1); - ST_W8(res0, res1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride); -} - -void ff_avg_bilin_4hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter_horiz = vp9_bilinear_filters_msa[mx - 1]; - const int8_t *filter_vert = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_hv_2ht_2vt_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } else if (8 == height) { - common_hv_2ht_2vt_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } -} - -static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert) -{ - uint64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, mask; - v16u8 filt_hz, filt_vt, dst0, dst1, vec0, vec1, vec2, vec3; - v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); - - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp0 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - vec1 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp1 = __msa_dotp_u_h(vec1, filt_vt); - - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - vec2 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp2 = __msa_dotp_u_h(vec2, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - vec3 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp3 = __msa_dotp_u_h(vec3, filt_vt); - - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7); - PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst, dst_stride); -} - -static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - uint64_t tp0, tp1, tp2, tp3; - v16i8 src0, src1, src2, src3, src4, mask; - v16u8 filt_hz, filt_vt, vec0, dst0, dst1; - v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - src0 = LD_SB(src); - src += src_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp0 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp1 = __msa_dotp_u_h(vec0, filt_vt); - - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); - tmp2 = __msa_dotp_u_h(vec0, filt_vt); - - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - vec0 = (v16u8) __msa_ilvev_b((v16i8) hz_out0, (v16i8) hz_out1); - tmp3 = __msa_dotp_u_h(vec0, filt_vt); - - SRARI_H2_UH(tmp2, tmp3, 7); - SAT_UH2_UH(tmp2, tmp3, 7); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst, dst_stride); - dst += (4 * dst_stride); - } -} - -void ff_avg_bilin_8hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - const int8_t *filter_horiz = vp9_bilinear_filters_msa[mx - 1]; - const int8_t *filter_vert = vp9_bilinear_filters_msa[my - 1]; - - if (4 == height) { - common_hv_2ht_2vt_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } else { - common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(src, src_stride, - dst, dst_stride, - filter_horiz, filter_vert, - height); - } -} - -void ff_avg_bilin_16hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - uint32_t loop_cnt; - const int8_t *filter_horiz = vp9_bilinear_filters_msa[mx - 1]; - const int8_t *filter_vert = vp9_bilinear_filters_msa[my - 1]; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt_hz, filt_vt, vec0, vec1, dst0, dst1, dst2, dst3; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, tmp0, tmp1; - v8i16 filt; - - mask = LD_SB(&mc_filt_mask_arr[0]); - - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8) __msa_splati_h(filt, 0); - - filt = LD_SH(filter_vert); - filt_vt = (v16u8) __msa_splati_h(filt, 0); - - LD_SB2(src, 8, src0, src1); - src += src_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - - for (loop_cnt = (height >> 2); loop_cnt--;) { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - - hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, 7); - hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst); - dst += dst_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst1, dst); - dst += dst_stride; - - hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, 7); - hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst); - dst += dst_stride; - - hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, 7); - hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, 7); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, 7); - SAT_UH2_UH(tmp0, tmp1, 7); - PCKEV_AVG_ST_UB(tmp1, tmp0, dst3, dst); - dst += dst_stride; - } -} - -void ff_avg_bilin_32hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 2; multiple8_cnt--;) { - ff_avg_bilin_16hv_msa(dst, dst_stride, src, src_stride, height, mx, my); - - src += 16; - dst += 16; - } -} - -void ff_avg_bilin_64hv_msa(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src, ptrdiff_t src_stride, - int height, int mx, int my) -{ - int32_t multiple8_cnt; - - for (multiple8_cnt = 4; multiple8_cnt--;) { - ff_avg_bilin_16hv_msa(dst, dst_stride, src, src_stride, height, mx, my); - - src += 16; - dst += 16; - } -} - -static void copy_width8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - uint64_t out0, out1, out2, out3, out4, out5, out6, out7; - - if (0 == height % 8) { - for (cnt = height >> 3; cnt--;) { - LD4(src, src_stride, out0, out1, out2, out3); - src += (4 * src_stride); - LD4(src, src_stride, out4, out5, out6, out7); - src += (4 * src_stride); - - SD4(out0, out1, out2, out3, dst, dst_stride); - dst += (4 * dst_stride); - SD4(out4, out5, out6, out7, dst, dst_stride); - dst += (4 * dst_stride); - } - } else if (0 == height % 4) { - for (cnt = (height / 4); cnt--;) { - LD4(src, src_stride, out0, out1, out2, out3); - src += (4 * src_stride); - - SD4(out0, out1, out2, out3, dst, dst_stride); - dst += (4 * dst_stride); - } - } -} - -static void copy_width16_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - - if (8 == height) { - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - } else if (16 == height) { - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - dst += (8 * dst_stride); - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - dst += (8 * dst_stride); - } else if (32 == height) { - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - dst += (8 * dst_stride); - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - dst += (8 * dst_stride); - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - dst += (8 * dst_stride); - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - } else if (0 == height % 4) { - for (cnt = (height >> 2); cnt--;) { - LD_UB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - ST_UB4(src0, src1, src2, src3, dst, dst_stride); - dst += (4 * dst_stride); - } - } -} - -static void copy_width32_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - - if (0 == height % 8) { - for (cnt = (height >> 3); cnt--;) { - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - LD_UB8(src + 16, src_stride, src0, src1, src2, src3, src4, src5, src6, - src7); - src += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst + 16, - dst_stride); - dst += (8 * dst_stride); - } - } else if (0 == height % 4) { - for (cnt = (height >> 2); cnt--;) { - LD_UB4(src, src_stride, src0, src1, src2, src3); - LD_UB4(src + 16, src_stride, src4, src5, src6, src7); - src += (4 * src_stride); - ST_UB4(src0, src1, src2, src3, dst, dst_stride); - ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride); - dst += (4 * dst_stride); - } - } -} - -static void copy_width64_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 src8, src9, src10, src11, src12, src13, src14, src15; - - for (cnt = (height >> 2); cnt--;) { - LD_UB4(src, 16, src0, src1, src2, src3); - src += src_stride; - LD_UB4(src, 16, src4, src5, src6, src7); - src += src_stride; - LD_UB4(src, 16, src8, src9, src10, src11); - src += src_stride; - LD_UB4(src, 16, src12, src13, src14, src15); - src += src_stride; - - ST_UB4(src0, src1, src2, src3, dst, 16); - dst += dst_stride; - ST_UB4(src4, src5, src6, src7, dst, 16); - dst += dst_stride; - ST_UB4(src8, src9, src10, src11, dst, 16); - dst += dst_stride; - ST_UB4(src12, src13, src14, src15, dst, 16); - dst += dst_stride; - } -} - -static void avg_width4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - uint32_t tp0, tp1, tp2, tp3; - v16u8 src0 = { 0 }, src1 = { 0 }, dst0 = { 0 }, dst1 = { 0 }; - - if (8 == height) { - LW4(src, src_stride, tp0, tp1, tp2, tp3); - src += 4 * src_stride; - INSERT_W4_UB(tp0, tp1, tp2, tp3, src0); - LW4(src, src_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, src1); - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - LW4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1); - AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1); - ST_W8(dst0, dst1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride); - } else if (4 == height) { - LW4(src, src_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, src0); - LW4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0); - dst0 = __msa_aver_u_b(src0, dst0); - ST_W4(dst0, 0, 1, 2, 3, dst, dst_stride); - } -} - -static void avg_width8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - uint64_t tp0, tp1, tp2, tp3, tp4, tp5, tp6, tp7; - v16u8 src0, src1, src2, src3; - v16u8 dst0, dst1, dst2, dst3; - - if (0 == (height % 8)) { - for (cnt = (height >> 3); cnt--;) { - LD4(src, src_stride, tp0, tp1, tp2, tp3); - src += 4 * src_stride; - LD4(src, src_stride, tp4, tp5, tp6, tp7); - src += 4 * src_stride; - INSERT_D2_UB(tp0, tp1, src0); - INSERT_D2_UB(tp2, tp3, src1); - INSERT_D2_UB(tp4, tp5, src2); - INSERT_D2_UB(tp6, tp7, src3); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - LD4(dst + 4 * dst_stride, dst_stride, tp4, tp5, tp6, tp7); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - INSERT_D2_UB(tp4, tp5, dst2); - INSERT_D2_UB(tp6, tp7, dst3); - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, - dst1, dst2, dst3); - ST_D8(dst0, dst1, dst2, dst3, 0, 1, 0, 1, 0, 1, 0, 1, dst, dst_stride); - dst += 8 * dst_stride; - } - } else if (4 == height) { - LD4(src, src_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, src0); - INSERT_D2_UB(tp2, tp3, src1); - LD4(dst, dst_stride, tp0, tp1, tp2, tp3); - INSERT_D2_UB(tp0, tp1, dst0); - INSERT_D2_UB(tp2, tp3, dst1); - AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1); - ST_D4(dst0, dst1, 0, 1, 0, 1, dst, dst_stride); - } -} - -static void avg_width16_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - - if (0 == (height % 8)) { - for (cnt = (height / 8); cnt--;) { - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); - - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); - ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, dst_stride); - dst += (8 * dst_stride); - } - } else if (0 == (height % 4)) { - for (cnt = (height / 4); cnt--;) { - LD_UB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); - dst += (4 * dst_stride); - } - } -} - -static void avg_width32_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - uint8_t *dst_dup = dst; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 src8, src9, src10, src11, src12, src13, src14, src15; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15; - - if (0 == (height % 8)) { - for (cnt = (height / 8); cnt--;) { - LD_UB4(src, src_stride, src0, src2, src4, src6); - LD_UB4(src + 16, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - LD_UB4(dst_dup, dst_stride, dst0, dst2, dst4, dst6); - LD_UB4(dst_dup + 16, dst_stride, dst1, dst3, dst5, dst7); - dst_dup += (4 * dst_stride); - LD_UB4(src, src_stride, src8, src10, src12, src14); - LD_UB4(src + 16, src_stride, src9, src11, src13, src15); - src += (4 * src_stride); - LD_UB4(dst_dup, dst_stride, dst8, dst10, dst12, dst14); - LD_UB4(dst_dup + 16, dst_stride, dst9, dst11, dst13, dst15); - dst_dup += (4 * dst_stride); - - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); - AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, - dst8, dst9, dst10, dst11); - AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, - dst12, dst13, dst14, dst15); - - ST_UB4(dst0, dst2, dst4, dst6, dst, dst_stride); - ST_UB4(dst1, dst3, dst5, dst7, dst + 16, dst_stride); - dst += (4 * dst_stride); - ST_UB4(dst8, dst10, dst12, dst14, dst, dst_stride); - ST_UB4(dst9, dst11, dst13, dst15, dst + 16, dst_stride); - dst += (4 * dst_stride); - } - } else if (0 == (height % 4)) { - for (cnt = (height / 4); cnt--;) { - LD_UB4(src, src_stride, src0, src2, src4, src6); - LD_UB4(src + 16, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - LD_UB4(dst_dup, dst_stride, dst0, dst2, dst4, dst6); - LD_UB4(dst_dup + 16, dst_stride, dst1, dst3, dst5, dst7); - dst_dup += (4 * dst_stride); - - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); - - ST_UB4(dst0, dst2, dst4, dst6, dst, dst_stride); - ST_UB4(dst1, dst3, dst5, dst7, dst + 16, dst_stride); - dst += (4 * dst_stride); - } - } -} - -static void avg_width64_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, - int32_t height) -{ - int32_t cnt; - uint8_t *dst_dup = dst; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 src8, src9, src10, src11, src12, src13, src14, src15; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15; - - for (cnt = (height / 4); cnt--;) { - LD_UB4(src, 16, src0, src1, src2, src3); - src += src_stride; - LD_UB4(src, 16, src4, src5, src6, src7); - src += src_stride; - LD_UB4(src, 16, src8, src9, src10, src11); - src += src_stride; - LD_UB4(src, 16, src12, src13, src14, src15); - src += src_stride; - - LD_UB4(dst_dup, 16, dst0, dst1, dst2, dst3); - dst_dup += dst_stride; - LD_UB4(dst_dup, 16, dst4, dst5, dst6, dst7); - dst_dup += dst_stride; - LD_UB4(dst_dup, 16, dst8, dst9, dst10, dst11); - dst_dup += dst_stride; - LD_UB4(dst_dup, 16, dst12, dst13, dst14, dst15); - dst_dup += dst_stride; - - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); - AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, - dst8, dst9, dst10, dst11); - AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, - dst12, dst13, dst14, dst15); - - ST_UB4(dst0, dst1, dst2, dst3, dst, 16); - dst += dst_stride; - ST_UB4(dst4, dst5, dst6, dst7, dst, 16); - dst += dst_stride; - ST_UB4(dst8, dst9, dst10, dst11, dst, 16); - dst += dst_stride; - ST_UB4(dst12, dst13, dst14, dst15, dst, 16); - dst += dst_stride; - } -} - -static const int8_t vp9_subpel_filters_msa[3][15][8] = { - [FILTER_8TAP_REGULAR] = { - {0, 1, -5, 126, 8, -3, 1, 0}, - {-1, 3, -10, 122, 18, -6, 2, 0}, - {-1, 4, -13, 118, 27, -9, 3, -1}, - {-1, 4, -16, 112, 37, -11, 4, -1}, - {-1, 5, -18, 105, 48, -14, 4, -1}, - {-1, 5, -19, 97, 58, -16, 5, -1}, - {-1, 6, -19, 88, 68, -18, 5, -1}, - {-1, 6, -19, 78, 78, -19, 6, -1}, - {-1, 5, -18, 68, 88, -19, 6, -1}, - {-1, 5, -16, 58, 97, -19, 5, -1}, - {-1, 4, -14, 48, 105, -18, 5, -1}, - {-1, 4, -11, 37, 112, -16, 4, -1}, - {-1, 3, -9, 27, 118, -13, 4, -1}, - {0, 2, -6, 18, 122, -10, 3, -1}, - {0, 1, -3, 8, 126, -5, 1, 0}, - }, [FILTER_8TAP_SHARP] = { - {-1, 3, -7, 127, 8, -3, 1, 0}, - {-2, 5, -13, 125, 17, -6, 3, -1}, - {-3, 7, -17, 121, 27, -10, 5, -2}, - {-4, 9, -20, 115, 37, -13, 6, -2}, - {-4, 10, -23, 108, 48, -16, 8, -3}, - {-4, 10, -24, 100, 59, -19, 9, -3}, - {-4, 11, -24, 90, 70, -21, 10, -4}, - {-4, 11, -23, 80, 80, -23, 11, -4}, - {-4, 10, -21, 70, 90, -24, 11, -4}, - {-3, 9, -19, 59, 100, -24, 10, -4}, - {-3, 8, -16, 48, 108, -23, 10, -4}, - {-2, 6, -13, 37, 115, -20, 9, -4}, - {-2, 5, -10, 27, 121, -17, 7, -3}, - {-1, 3, -6, 17, 125, -13, 5, -2}, - {0, 1, -3, 8, 127, -7, 3, -1}, - }, [FILTER_8TAP_SMOOTH] = { - {-3, -1, 32, 64, 38, 1, -3, 0}, - {-2, -2, 29, 63, 41, 2, -3, 0}, - {-2, -2, 26, 63, 43, 4, -4, 0}, - {-2, -3, 24, 62, 46, 5, -4, 0}, - {-2, -3, 21, 60, 49, 7, -4, 0}, - {-1, -4, 18, 59, 51, 9, -4, 0}, - {-1, -4, 16, 57, 53, 12, -4, -1}, - {-1, -4, 14, 55, 55, 14, -4, -1}, - {-1, -4, 12, 53, 57, 16, -4, -1}, - {0, -4, 9, 51, 59, 18, -4, -1}, - {0, -4, 7, 49, 60, 21, -3, -2}, - {0, -4, 5, 46, 62, 24, -3, -2}, - {0, -4, 4, 43, 63, 26, -2, -2}, - {0, -3, 2, 41, 63, 29, -2, -2}, - {0, -3, 1, 38, 64, 32, -1, -3}, - } -}; - -#define VP9_8TAP_MIPS_MSA_FUNC(SIZE, type, type_idx) \ -void ff_put_8tap_##type##_##SIZE##h_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, \ - ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - const int8_t *filter = vp9_subpel_filters_msa[type_idx][mx-1]; \ - \ - common_hz_8t_##SIZE##w_msa(src, srcstride, dst, dststride, filter, h); \ -} \ - \ -void ff_put_8tap_##type##_##SIZE##v_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, \ - ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - const int8_t *filter = vp9_subpel_filters_msa[type_idx][my-1]; \ - \ - common_vt_8t_##SIZE##w_msa(src, srcstride, dst, dststride, filter, h); \ -} \ - \ -void ff_put_8tap_##type##_##SIZE##hv_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, \ - ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - const int8_t *hfilter = vp9_subpel_filters_msa[type_idx][mx-1]; \ - const int8_t *vfilter = vp9_subpel_filters_msa[type_idx][my-1]; \ - \ - common_hv_8ht_8vt_##SIZE##w_msa(src, srcstride, dst, dststride, hfilter, \ - vfilter, h); \ -} \ - \ -void ff_avg_8tap_##type##_##SIZE##h_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, \ - ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - const int8_t *filter = vp9_subpel_filters_msa[type_idx][mx-1]; \ - \ - common_hz_8t_and_aver_dst_##SIZE##w_msa(src, srcstride, dst, \ - dststride, filter, h); \ -} \ - \ -void ff_avg_8tap_##type##_##SIZE##v_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, \ - ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - const int8_t *filter = vp9_subpel_filters_msa[type_idx][my-1]; \ - \ - common_vt_8t_and_aver_dst_##SIZE##w_msa(src, srcstride, dst, dststride, \ - filter, h); \ -} \ - \ -void ff_avg_8tap_##type##_##SIZE##hv_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, \ - ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - const int8_t *hfilter = vp9_subpel_filters_msa[type_idx][mx-1]; \ - const int8_t *vfilter = vp9_subpel_filters_msa[type_idx][my-1]; \ - \ - common_hv_8ht_8vt_and_aver_dst_##SIZE##w_msa(src, srcstride, dst, \ - dststride, hfilter, \ - vfilter, h); \ -} - -#define VP9_COPY_AVG_MIPS_MSA_FUNC(SIZE) \ -void ff_copy##SIZE##_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - \ - copy_width##SIZE##_msa(src, srcstride, dst, dststride, h); \ -} \ - \ -void ff_avg##SIZE##_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - \ - avg_width##SIZE##_msa(src, srcstride, dst, dststride, h); \ -} - -#define VP9_AVG_MIPS_MSA_FUNC(SIZE) \ -void ff_avg##SIZE##_msa(uint8_t *dst, ptrdiff_t dststride, \ - const uint8_t *src, ptrdiff_t srcstride, \ - int h, int mx, int my) \ -{ \ - \ - avg_width##SIZE##_msa(src, srcstride, dst, dststride, h); \ -} - -VP9_8TAP_MIPS_MSA_FUNC(64, regular, FILTER_8TAP_REGULAR); -VP9_8TAP_MIPS_MSA_FUNC(32, regular, FILTER_8TAP_REGULAR); -VP9_8TAP_MIPS_MSA_FUNC(16, regular, FILTER_8TAP_REGULAR); -VP9_8TAP_MIPS_MSA_FUNC(8, regular, FILTER_8TAP_REGULAR); -VP9_8TAP_MIPS_MSA_FUNC(4, regular, FILTER_8TAP_REGULAR); - -VP9_8TAP_MIPS_MSA_FUNC(64, sharp, FILTER_8TAP_SHARP); -VP9_8TAP_MIPS_MSA_FUNC(32, sharp, FILTER_8TAP_SHARP); -VP9_8TAP_MIPS_MSA_FUNC(16, sharp, FILTER_8TAP_SHARP); -VP9_8TAP_MIPS_MSA_FUNC(8, sharp, FILTER_8TAP_SHARP); -VP9_8TAP_MIPS_MSA_FUNC(4, sharp, FILTER_8TAP_SHARP); - -VP9_8TAP_MIPS_MSA_FUNC(64, smooth, FILTER_8TAP_SMOOTH); -VP9_8TAP_MIPS_MSA_FUNC(32, smooth, FILTER_8TAP_SMOOTH); -VP9_8TAP_MIPS_MSA_FUNC(16, smooth, FILTER_8TAP_SMOOTH); -VP9_8TAP_MIPS_MSA_FUNC(8, smooth, FILTER_8TAP_SMOOTH); -VP9_8TAP_MIPS_MSA_FUNC(4, smooth, FILTER_8TAP_SMOOTH); - -VP9_COPY_AVG_MIPS_MSA_FUNC(64); -VP9_COPY_AVG_MIPS_MSA_FUNC(32); -VP9_COPY_AVG_MIPS_MSA_FUNC(16); -VP9_COPY_AVG_MIPS_MSA_FUNC(8); -VP9_AVG_MIPS_MSA_FUNC(4); - -#undef VP9_8TAP_MIPS_MSA_FUNC -#undef VP9_COPY_AVG_MIPS_MSA_FUNC -#undef VP9_AVG_MIPS_MSA_FUNC diff --git a/spaces/congsaPfin/Manga-OCR/logs/A.R. Rahman Telugu Songs Enjoy the Melodies of the Legendary Composer.md b/spaces/congsaPfin/Manga-OCR/logs/A.R. Rahman Telugu Songs Enjoy the Melodies of the Legendary Composer.md deleted file mode 100644 index 19cd6ae2a94caa1b414fe022ab284eb043b251fd..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/A.R. Rahman Telugu Songs Enjoy the Melodies of the Legendary Composer.md +++ /dev/null @@ -1,151 +0,0 @@ - -

      How to Download AR Rahman Hit Songs in Telugu

      -

      If you are a fan of Indian music, you must have heard of AR Rahman, the legendary composer, singer, and musician who has won numerous awards and accolades for his work. He is widely regarded as one of the most influential and innovative artists in the world, who has created music for various genres, languages, and cultures. He is especially known for his hit songs in Telugu, one of the major languages spoken in India.

      -

      ar rahman hit songs telugu download


      DOWNLOADhttps://urlca.com/2uO9Cs



      -

      In this article, we will tell you how to download AR Rahman hit songs in Telugu, what are some of the benefits of listening to his songs, and why he is considered as the Mozart of Madras.

      -

      Who is AR Rahman and why is he famous?

      -

      AR Rahman, whose full name is Allah Rakha Rahman, was born as AS Dileep Kumar on January 6, 1967, in Chennai, Tamil Nadu. He started learning piano at the age of four and assisted his father, who was a music composer for Malayalam films. After his father's death, he dropped out of school and started working as a professional keyboardist to support his family. He later received a scholarship to study Western classical music at Trinity College, Oxford.

      -

      In 1988, he converted to Islam after a sister's recovery from a serious illness. He changed his name to Allah Rakha Rahman and began composing music for advertisements and documentaries. In 1991, he met film director Mani Ratnam, who offered him to compose music for his film Roja. The film's soundtrack became a huge success and won him his first National Film Award for Best Music Direction.

      -

      Since then, he has composed music for over 100 films in various languages, including Tamil, Hindi, Telugu, Malayalam, English, Arabic, Chinese, and Japanese. He has also composed music for stage shows, albums, television shows, video games, and documentaries. He has won six National Film Awards, two Academy Awards, two Grammy Awards, a BAFTA Award, a Golden Globe Award, fifteen Filmfare Awards, seventeen Filmfare Awards South, and many other honors. He is also a recipient of the Padma Shri and Padma Bhushan awards from the Indian government.

      -

      Some of his most famous works include Bombay (1995), Dil Se (1998), Taal (1999), Lagaan (2001), Rang De Basanti (2006), Slumdog Millionaire (2008), Delhi-6 (2009), Rockstar (2011), Highway (2014), OK Kanmani (2015), Mersal (2017), 99 Songs (2021), etc.

      -

      What are some of his hit songs in Telugu language?

      -

      AR Rahman has composed music for many Telugu films and albums over the years. Some of his hit songs in Telugu language are:

      -
        -
      • Poovullo Daagunna from Jeans (1998)
      • -
      • Pedave Palikina from Nani (2004)
      • -
      • Munbe Vaa from Sillunu Oru Kaadhal (2006)
      • -
      • Nuvvu Nenu Prema from Nuvvu Nenu Prema (2006)
      • -
      • Jai Ho from Slumdog Millionaire (2008)
      • -
      • Nenjae Yezhu from Maryan (2013)
      • -
      • Mental Manadhil from OK Bangaram (2015)
      • -
      • Cheliyaa from Cheliyaa (2017)
      • -
      • Naan Varuven from Raavan (2010)
      • -
      • Yeh Haseen Vadiyan from Roja (1992)
      • -
      • Chinna Chinna Aasai from Roja (1992)
      • -
      • Sakhiya from Sakhi (2000)
      • -
      • Yedho Ondru from Paiyaa (2010)
      • -
      • Vellipomaakey from Saahasam Swaasaga Saagipo (2016)
      • -
      -

      How to download AR Rahman hit songs in Telugu?

      -

      If you want to download AR Rahman hit songs in Telugu, you have many options to choose from. There are many websites and apps that offer legal and safe downloads of his songs. However, you should be careful of some sites that may contain viruses, malware, or pirated content. Here are some of the best sites to download his songs in Telugu:

      -

      ar rahman telugu songs mp3 free download
      -ar rahman best telugu songs download
      -ar rahman telugu super hit melodies
      -ar rahman telugu songs list download
      -ar rahman telugu hit songs zip file download
      -ar rahman telugu songs download naa songs
      -ar rahman telugu hit songs online listen
      -ar rahman telugu hit songs jukebox
      -ar rahman telugu hit songs lyrics
      -ar rahman telugu hit songs video download
      -ar rahman telugu hit songs ringtone download
      -ar rahman telugu hit songs playlist
      -ar rahman telugu hit songs gaana
      -ar rahman telugu hit songs apple music
      -ar rahman telugu hit songs jiosaavn
      -ar rahman telugu hit songs spotify
      -ar rahman telugu hit songs youtube
      -ar rahman telugu hit songs 320kbps download
      -ar rahman telugu hit songs 2022 download
      -ar rahman telugu hit songs 2021 download
      -ar rahman telugu hit songs 2020 download
      -ar rahman telugu hit songs 2019 download
      -ar rahman telugu hit songs 2018 download
      -ar rahman telugu hit songs 2017 download
      -ar rahman telugu hit songs 2016 download
      -ar rahman telugu hit songs 2015 download
      -ar rahman telugu hit songs 2014 download
      -ar rahman telugu hit songs 2013 download
      -ar rahman telugu hit songs 2012 download
      -ar rahman telugu hit songs 2011 download
      -ar rahman telugu hit songs 2010 download
      -ar rahman telugu hit songs 2009 download
      -ar rahman telugu hit songs 2008 download
      -ar rahman telugu hit songs 2007 download
      -ar rahman telugu hit songs 2006 download
      -ar rahman telugu hit songs 2005 download
      -ar rahman telugu hit songs 2004 download
      -ar rahman telugu hit songs 2003 download
      -ar rahman telugu hit songs 2002 download
      -ar rahman telugu hit songs 2001 download
      -ar rahman oke okkadu movie mp3 song free download in Telugu
      -jeans movie mp3 song free download in Telugu by AR Rahman
      -love birds movie mp3 song free download in Telugu by AR Rahman
      -paravasam movie mp3 song free download in Telugu by AR Rahman

      -

      Gaana

      -

      Gaana is one of the most popular music streaming and downloading platforms in India. It has a huge collection of AR Rahman songs in various languages, including Telugu. You can download his songs for free or subscribe to Gaana Plus for unlimited downloads and offline listening. Here are the steps to download his songs from Gaana:

      -
        -
      1. Go to Gaana.com or download the Gaana app on your device.
      2. -
      3. Search for AR Rahman or his songs in the search bar.
      4. -
      5. Select the song you want to download and click on the download icon.
      6. -
      7. If you are a Gaana Plus subscriber, you can download the song directly. If not, you can sign up for a free trial or pay for a subscription plan.
      8. -
      9. Enjoy listening to his songs offline.
      10. -
      -

      JioSaavn

      -

      JioSaavn is another popular music streaming and downloading platform in India. It has a large collection of AR Rahman songs in various languages, including Telugu. You can download his songs for free or subscribe to JioSaavn Pro for unlimited downloads and offline listening. Here are the steps to download his songs from JioSaavn:

      -
        -
      1. Go to JioSaavn.com or download the JioSaavn app on your device.
      2. -
      3. Search for AR Rahman or his songs in the search bar.
      4. -
      5. Select the song you want to download and click on the download icon.
      6. -
      7. If you are a JioSaavn Pro subscriber, you can download the song directly. If not, you can sign up for a free trial or pay for a subscription plan.
      8. -
      9. Enjoy listening to his songs offline.
      10. -
      -

      Hungama Music

      -

      Hungama Music is another popular music streaming and downloading platform in India. It has a good collection of AR Rahman songs in various languages, including Telugu. You can download his songs for free or subscribe to Hungama Music Pro for unlimited downloads and offline listening. Here are the steps to download his songs from Hungama Music:

      -
        -
      1. Go to Hungama.com or download the Hungama Music app on your device.
      2. -
      3. Search for AR Rahman or his songs in the search bar.
      4. -
      5. Select the song you want to download and click on the download icon.
      6. -
      7. If you are a Hungama Music Pro subscriber, you can download the song directly. If not, you can sign up for a free trial or pay for a subscription plan.
      8. -
      9. Enjoy listening to his songs offline.
      10. -
      -

      Tips and tricks to enhance the downloading experience

      -

      Here are some tips and tricks to enhance your downloading experience of AR Rahman hit songs in Telugu:

      -
        -
      • Make sure you have a stable and fast internet connection before downloading.
      • -
      • Check the file size and format of the song before downloading. Choose the one that suits your device and storage space.
      • -
      • Create playlists of your favorite AR Rahman songs and download them in bulk.
      • -
      • Delete any unwanted or duplicate files from your device to free up space and avoid confusion.
      • -
      • Rename and organize your downloaded files according to your preference.
      • -
      -

      What are some of the benefits of listening to AR Rahman hit songs in Telugu?

      -

      Listening to AR Rahman hit songs in Telugu has many benefits for your mind, body, and soul. Here are some of them:

      Improving your mood and mental health

      -

      Music has a powerful effect on your mood and mental health. It can make you feel happy, sad, calm, excited, or anything in between. Listening to AR Rahman hit songs in Telugu can help you improve your mood and mental health by:

      -
        -
      • Reducing stress and anxiety. His songs have soothing melodies, harmonies, and rhythms that can relax your mind and body.
      • -
      • Boosting your self-esteem and confidence. His songs have inspiring lyrics, messages, and themes that can motivate you to achieve your goals and dreams.
      • -
      • Enhancing your creativity and imagination. His songs have innovative and original musical elements that can stimulate your brain and spark your imagination.
      • -
      • Healing your emotions and feelings. His songs have expressive and emotional vocals and instruments that can resonate with your feelings and help you cope with them.
      • -
      -

      Learning the Telugu language and culture

      -

      Music is a great way to learn a new language and culture. It can help you improve your vocabulary, grammar, pronunciation, listening, and speaking skills. Listening to AR Rahman hit songs in Telugu can help you learn the Telugu language and culture by:

      -
        -
      • Exposing you to a rich and diverse vocabulary. His songs have words from various fields, such as nature, love, spirituality, philosophy, etc.
      • -
      • Teaching you the grammar and syntax of the language. His songs have sentences that follow the rules and structures of the language.
      • -
      • Helping you with the pronunciation and accent of the language. His songs have clear and crisp vocals that can help you with the sounds and tones of the language.
      • -
      • Familiarizing you with the culture and traditions of the Telugu people. His songs have references to the history, geography, literature, art, festivals, customs, etc. of the Telugu people.
      • -
      -

      Appreciating the music and talent of AR Rahman

      -

      Music is an art form that requires skill, talent, passion, and dedication. It can help you appreciate the beauty and complexity of music and the talent of the musicians. Listening to AR Rahman hit songs in Telugu can help you appreciate the music and talent of AR Rahman by:

      -
        -
      • Admiring his musical genius and versatility. His songs have a variety of musical genres, styles, influences, techniques, instruments, etc.
      • -
      • Acknowledging his musical achievements and awards. His songs have won him many national and international awards and recognition.
      • -
      • Supporting his musical vision and mission. His songs have a positive impact on society and humanity.
      • -
      • Loving his musical personality and charisma. His songs have a personal touch and charm that can connect with his listeners.
      • -
      -

      Conclusion

      -

      In conclusion, AR Rahman is one of the most celebrated and respected musicians in the world. He has composed many hit songs in Telugu that are loved by millions of people. You can download his songs from various websites and apps that offer legal and safe downloads. You can also enjoy many benefits from listening to his songs, such as improving your mood, learning the language, appreciating the culture, and admiring his talent. So what are you waiting for? Download his songs today and enjoy the magic of his music!

      -

      Frequently Asked Questions

      -

      Here are some frequently asked questions about AR Rahman hit songs in Telugu:

      -

      Q: What is AR Rahman's real name?

      -

      A: AR Rahman's real name is Allah Rakha Rahman. He was born as AS Dileep Kumar on January 6, 1967.

      -

      Q: How many languages does AR Rahman compose music in?

      -

      A: AR Rahman composes music in various languages, including Tamil, Hindi, Telugu, Malayalam, English, Arabic, Chinese, Japanese, etc.

      -

      Q: Which film gave AR Rahman his first Oscar?

      -

      A: AR Rahman won his first Oscar for his music in Slumdog Millionaire (2008), directed by Danny Boyle.

      -

      Q: Which song made AR Rahman famous in Telugu?

      -

      A: One of the songs that made AR Rahman famous in Telugu was Yeh Haseen Vadiyan from Roja (1992), directed by Mani Ratnam.

      -

      Q: Where can I find more information about AR Rahman?

      -

      A: You can find more information about AR Rahman on his official website www.arrahman.com, his social media accounts Facebook, Twitter, Instagram, and his YouTube channel AR Rahman.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Boost Your Brain and Your IQ with Countries Flags Game - Download for Free.md b/spaces/congsaPfin/Manga-OCR/logs/Boost Your Brain and Your IQ with Countries Flags Game - Download for Free.md deleted file mode 100644 index a126629e3bdf1cd8ba3bceb43594e07126b3f845..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Boost Your Brain and Your IQ with Countries Flags Game - Download for Free.md +++ /dev/null @@ -1,101 +0,0 @@ - -

      Countries Flags Game Download: A Fun and Educational Way to Learn About the World

      -

      Do you love trivia games? Do you want to improve your geography skills? Do you enjoy learning about different cultures and histories? If you answered yes to any of these questions, then you might be interested in countries flags games. These are games that involve identifying, matching, or solving puzzles with flags and maps of various countries and regions around the world. They are not only entertaining, but also useful for enhancing your knowledge and awareness of the world.

      -

      Types of Countries Flags Games

      -

      There are many types of countries flags games available to download for free on different platforms and devices. Here are some of the most common ones:

      -

      countries flags game download


      Download Ziphttps://urlca.com/2uOevq



      -

      Flag Quiz Games

      -

      These are games that test your knowledge of flags and maps with multiple choice questions, hints, and challenges. You can choose from different levels of difficulty, categories, and modes. You can also compete with your friends or other players online and see who has the best score. Some examples of flag quiz games are Flags 2: Multiplayer and Flags Quiz!.

      -

      Flag Matching Games

      -

      These are games that require you to match the flags with the countries or regions they belong to. You can either drag and drop the flags to their corresponding places on a map or select them from a list. You can also learn more details about each country or region, such as their capital city, population, area, or currency. Some examples of flag matching games are World Flags Quiz and Flag Master.

      -

      Flag Puzzle Games

      -

      These are games that challenge you to solve puzzles by arranging the pieces of flags or maps. You can either rotate, swap, or slide the pieces until they form a complete image. You can also adjust the number and shape of the pieces according to your preference. Some examples of flag puzzle games are Flag Jigsaw Puzzles and World Map Puzzle.

      -

      Benefits of Countries Flags Games

      -

      Countries flags games are not only fun, but also beneficial for your brain and your culture. Here are some of the benefits they offer:

      -

      Improve your memory and cognitive skills

      -

      By playing countries flags games, you can improve your memory and cognitive skills by recognizing patterns, shapes, and colors. You can also enhance your spatial awareness and visual perception by locating the flags and maps on a globe. These skills are essential for learning, problem-solving, and creativity.

      -

      Enhance your cultural awareness and curiosity

      -

      By playing countries flags games, you can enhance your cultural awareness and curiosity by learning about different countries and regions. You can discover their history, culture, geography, politics, and economy. You can also appreciate their diversity and uniqueness. These games can inspire you to travel, explore, and connect with other people around the world.

      -

      Have fun and compete with your friends or other players online

      -

      By playing countries flags games, you can have fun and compete with your friends or other players online. You can challenge yourself to beat your own records or to rank higher on the leaderboards. You can also share your achievements and progress on social media or chat with other players. These games can make learning more enjoyable and rewarding.

      -

      countries flags quiz game download
      -countries flags and maps game download
      -countries flags trivia game download
      -countries flags puzzle game download
      -countries flags multiplayer game download
      -countries flags education game download
      -countries flags learning game download
      -countries flags challenge game download
      -countries flags memory game download
      -countries flags matching game download
      -world country flags game download
      -national country flags game download
      -european country flags game download
      -asian country flags game download
      -african country flags game download
      -american country flags game download
      -oceanian country flags game download
      -free countries flags game download
      -offline countries flags game download
      -online countries flags game download
      -best countries flags game download
      -fun countries flags game download
      -easy countries flags game download
      -hard countries flags game download
      -new countries flags game download
      -android countries flags game download
      -ios countries flags game download
      -windows countries flags game download
      -mac countries flags game download
      -linux countries flags game download
      -pc countries flags game download
      -mobile countries flags game download
      -tablet countries flags game download
      -laptop countries flags game download
      -chromebook countries flags game download
      -apk countries flags game download
      -app countries flags game download
      -software countries flags game download
      -program countries flags game download
      -application countries flags game download
      -playstore countries flags game download
      -appstore countries flags game download
      -microsoftstore countries flags game download
      -steamstore countries flags game download
      -epicstore countries flags game download
      -googleplay countries flags game download
      -applestore countries flags game download
      -amazonstore countries flags game download
      -facebookgame countries flags game download

      -

      How to Download Countries Flags Games for Free

      -

      If you want to download countries flags games for free, you need to follow these steps:

      -

      Use a reliable and safe source such as Google Play Store, Microsoft Store, or Flagpedia.net

      -

      The first step is to use a reliable and safe source that offers free downloads of countries flags games. You can use the Google Play Store for Android devices, the Microsoft Store for Windows devices, or the Flagpedia.net website for any device. These sources have a wide selection of games that are verified and secure.

      -

      Choose a game that suits your preferences and device compatibility

      -

      The second step is to choose a game that suits your preferences and device compatibility. You can browse through the categories, ratings, reviews, screenshots, and descriptions of the games to find the one that interests you. You can also check the requirements, permissions, and updates of the games to make sure they are compatible with your device.

      -

      Follow the instructions to install and launch the game

      -

      The third step is to follow the instructions to install and launch the game. You can either click on the download button or scan the QR code to start the download process. You can then follow the prompts to accept the terms and conditions, grant the permissions, and complete the installation. You can then open the game and start playing.

      -

      Examples of Countries Flags Games to Download

      -

      Here are some examples of countries flags games that you can download for free:

      -

      Flags 2: Multiplayer - A multiplayer flag quiz game that improves your brain and challenges your IQ

      -

      This is a multiplayer flag quiz game that allows you to play with up to four players online or offline. You can choose from over 200 flags and 20 maps from all continents. You can also customize your avatar, name, color, and language. This game improves your brain and challenges your IQ by testing your knowledge of flags and maps.

      -

      Flags Quiz! - A free game full of fun that consists on guessing the names of hundreds of countries flags from around the world

      -

      This is a free game full of fun that consists on guessing the names of hundreds of countries flags from around the world. You can choose from four different game modes: Classic, Time Attack, Hard Mode, and Custom Mode. You can also use hints, skip questions, or ask for help from your friends. This game is suitable for all ages and levels.

      -

      Download all country flags of the world for free - A single package or embed service that allows you to use country flags in your news magazines, websites, software, mobile apps and master's thesis

      -

      This is a single package or embed service that allows you to use country flags in your news magazines, websites, software, mobile apps and master's thesis. You can download all country flags of the world for free in various formats (PNG, SVG) and sizes (16x16 px to 2500x2500 px). You can also use an API or a widget to embed country flags in your projects.

      -

      Conclusion

      -

      Countries flags games are a fun and educational way to learn about the world. They offer various types of games that test your knowledge of flags and maps, match the flags with the countries or regions they belong to, or solve puzzles by arranging the pieces of flags or maps. They also offer various benefits such as improving your memory and cognitive skills, enhancing your cultural awareness and curiosity, and having fun and competing with your friends or other players online. You can download countries flags games for free from reliable and safe sources such as Google Play Store, Microsoft Store, or Flagpedia.net. You can also choose a game that suits your preferences and device compatibility. You can also follow the instructions to install and launch the game. Here are some examples of countries flags games that you can download for free: Flags 2: Multiplayer, Flags Quiz!, and Download all country flags of the world for free.

      -

      FAQs

      -

      Here are some frequently asked questions about countries flags games:

      -

      What are the best countries flags games to download?

      -

      The best countries flags games to download depend on your personal preference, device compatibility, and source reliability. However, some of the most popular and highly rated ones are Flags 2: Multiplayer, Flags Quiz!, and Download all country flags of the world for free.

      -

      How can I learn more about the countries and regions that I see in the games?

      -

      You can learn more about the countries and regions that you see in the games by clicking on them or reading their details. You can also use other sources such as Wikipedia, Google Maps, or online encyclopedias to find more information.

      -

      How can I improve my score and rank in the games?

      -

      You can improve your score and rank in the games by playing more often, choosing harder levels or modes, using fewer hints or skips, and answering faster. You can also review your mistakes and learn from them.

      -

      Are countries flags games suitable for children?

      -

      Yes, countries flags games are suitable for children. They are fun, educational, and easy to play. They can help children develop their memory, cognitive, and cultural skills. They can also spark their interest and curiosity in the world.

      -

      Can I use countries flags in my own projects?

      -

      Yes, you can use countries flags in your own projects. You can download all country flags of the world for free from Flagpedia.net in various formats and sizes. You can also use an API or a widget to embed country flags in your projects. However, you should respect the intellectual property rights and licenses of the flag images and sources.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Dream League Soccer 2023 Mod Apk The Ultimate Guide to Unlock Everything - AN1.md b/spaces/congsaPfin/Manga-OCR/logs/Dream League Soccer 2023 Mod Apk The Ultimate Guide to Unlock Everything - AN1.md deleted file mode 100644 index 0654f27fa553e2a5f4054a9d8bdf32db191b9f8d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Dream League Soccer 2023 Mod Apk The Ultimate Guide to Unlock Everything - AN1.md +++ /dev/null @@ -1,90 +0,0 @@ -
      -

      Dream League Soccer 2023 Mod APK: Unlimited Coins and Diamonds

      -

      If you are a fan of soccer games, you might have heard of Dream League Soccer 2023, one of the most anticipated football games at the end of this year. With dozens of major updates waiting for you to discover, this game promises to bring you an immersive and realistic soccer experience on your mobile device. But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited coins and diamonds to build your dream team and dominate the league? Well, that's where a mod apk comes in handy. In this article, we will tell you everything you need to know about Dream League Soccer 2023 mod apk, including its features, how to download and install it, and some frequently asked questions.

      -

      Introduction

      -

      What is Dream League Soccer 2023?

      -

      Dream League Soccer 2023, or DLS 23 for short, is a soccer simulation game developed by First Touch Games. It is the latest installment in the popular Dream League Soccer series, which has been downloaded over 500 million times on Google Play Store. In this game, you can create your own soccer club, recruit players from around the world, customize your stadium, kits, and logos, and compete in various leagues and tournaments. You can also play online with other players or challenge your friends in multiplayer mode. The game features stunning graphics, realistic animations, dynamic sound effects, and intuitive controls that will make you feel like you are on the pitch.

      -

      dream league soccer 2023 mod apk unlimited coins and diamonds an1


      DOWNLOAD ✯✯✯ https://urlca.com/2uO8o7



      -

      What is a mod apk?

      -

      A mod apk is a modified version of an original application that has been altered by third-party developers to add or remove certain features. A mod apk can enhance the functionality, performance, or appearance of an app, or unlock some premium or paid features for free. For example, a mod apk for a game can give you unlimited resources, access to all levels or modes, or remove ads or in-app purchases.

      -

      Why use a mod apk for Dream League Soccer 2023?

      -

      While Dream League Soccer 2023 is a free-to-play game, it also has some in-game currency and items that require real money to purchase. These include coins and diamonds, which are used to buy players, upgrade your stadium, or unlock other features. However, earning coins and diamonds in the game can be time-consuming and tedious, especially if you want to have the best players and facilities. That's why some players prefer to use a mod apk for Dream League Soccer 2023, which can give them unlimited coins and diamonds without spending any money. This way, they can enjoy the game without any limitations or frustrations.

      -

      Features of Dream League Soccer 2023 Mod APK

      -

      Unlimited coins and diamonds

      -

      The main feature of Dream League Soccer 2023 mod apk is that it gives you unlimited coins and diamonds. Coins are the basic currency in the game, which are used to buy players from the transfer market or improve your stadium. Diamonds are the premium currency in the game, which are used to buy special players or items from the store. With unlimited coins and diamonds, you can build your dream team with ease and have access to all the features in the game.

      -

      Mega menu

      -

      Another feature of Dream League Soccer 2023 mod apk is that it has a mega menu that allows you to customize various aspects of the game. For example, you can change the difficulty level, enable or disable cheats, adjust the camera angle, or modify the player attributes. You can also activate some cheats, such as unlimited stamina, no offside, or no fouls. The mega menu gives you more control and flexibility over the game.

      -

      Unlocked logos, kits, and coaches

      -

      Another feature of Dream League Soccer 2023 mod apk is that it unlocks all the logos, kits, and coaches in the game. Logos and kits are the symbols and uniforms of your club, which you can customize to your liking. Coaches are the staff members who help you improve your team's performance and skills. With the mod apk, you can choose from hundreds of logos and kits from different clubs and countries, or create your own. You can also hire any coach you want, regardless of their level or price.

      -

      How to download and install Dream League Soccer 2023 Mod APK

      -

      Step 1: Download the mod apk file from a trusted source

      -

      The first step to use Dream League Soccer 2023 mod apk is to download the mod apk file from a trusted source. There are many websites that offer mod apk files for various games and apps, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should be careful and do some research before downloading any mod apk file. You can check the reviews, ratings, comments, or feedback from other users to see if the website is trustworthy or not. You can also use an antivirus or anti-malware software to scan the file before installing it.

      -

      dream league soccer 2023 mod apk unlimited money and gems an1
      -dream league soccer 2023 mod apk hack download an1
      -dream league soccer 2023 mod apk free shopping an1
      -dream league soccer 2023 mod apk latest version an1
      -dream league soccer 2023 mod apk offline an1
      -dream league soccer 2023 mod apk unlimited players development an1
      -dream league soccer 2023 mod apk all players unlocked an1
      -dream league soccer 2023 mod apk unlimited everything an1
      -dream league soccer 2023 mod apk android 1
      -dream league soccer 2023 mod apk obb data an1
      -dream league soccer 2023 mod apk revdl an1
      -dream league soccer 2023 mod apk rexdl an1
      -dream league soccer 2023 mod apk unlimited coins and diamonds download an1
      -dream league soccer 2023 mod apk no root an1
      -dream league soccer 2023 mod apk unlimited stamina an1
      -dream league soccer 2023 mod apk mega an1
      -dream league soccer 2023 mod apk full unlocked an1
      -dream league soccer 2023 mod apk unlimited kits and logos an1
      -dream league soccer 2023 mod apk high compress an1
      -dream league soccer 2023 mod apk unlimited transfers an1
      -dream league soccer 2023 mod apk cheat menu an1
      -dream league soccer 2023 mod apk god mode an1
      -dream league soccer 2023 mod apk premium edition an1
      -dream league soccer 2023 mod apk vip an1
      -dream league soccer 2023 mod apk with commentary an1
      -dream league soccer 2023 mod apk new update an1
      -dream league soccer 2023 mod apk original soundtracks an1
      -dream league soccer 2023 mod apk no ads an1
      -dream league soccer 2023 mod apk unlimited coins and diamonds generator an1
      -dream league soccer 2023 mod apk real madrid team an1
      -dream league soccer 2023 mod apk barcelona team an1
      -dream league soccer 2023 mod apk liverpool team an1
      -dream league soccer 2023 mod apk manchester united team an1
      -dream league soccer 2023 mod apk juventus team an1
      -dream league soccer 2023 mod apk psg team an1
      -dream league soccer 2023 mod apk bayern munich team an1
      -dream league soccer 2023 mod apk chelsea team an1
      -dream league soccer 2023 mod apk arsenal team an1
      -dream league soccer 2023 mod apk manchester city team an1
      -dream league soccer 2023 mod apk atletico madrid team an1
      -dream league soccer 2023 mod apk inter milan team an1
      -dream league soccer 2023 mod apk ac milan team an1
      -dream league soccer 2023 mod apk real betis team an1
      -dream league soccer 2023 mod apk leicester city team an1
      -dream league soccer 2023 mod apk borussia dortmund team an1
      -dream league soccer 2023 mod apk ajax team an1
      -dream league soccer 2023 mod apk tottenham hotspur team an1
      -dream league soccer 2023 mod apk napoli team an1
      -dream league soccer 2023 mod apk sevilla team an1

      -

      Step 2: Enable unknown sources on your device

      -

      The second step to use Dream League Soccer 2023 mod apk is to enable unknown sources on your device. Unknown sources are the settings that allow you to install applications from sources other than the official Google Play Store. By default, unknown sources are disabled on most Android devices for security reasons. However, since mod apk files are not available on the Google Play Store, you need to enable unknown sources to install them. To do this, you need to go to your device's settings, then security or privacy, then find and toggle on the option that says unknown sources or allow installation from unknown sources.

      -

      Step 3: Install the mod apk file and launch the game

      -

      The third and final step to use Dream League Soccer 2023 mod apk is to install the mod apk file and launch the game. To install the mod apk file, you need to locate it in your device's storage, then tap on it and follow the instructions on the screen. It may take a few seconds or minutes depending on the size of the file and your device's performance. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You should see a confirmation message that says "Dream League Soccer 2023 Mod APK Installed Successfully". Now you can enjoy the game with unlimited coins and diamonds and other features.

      -

      Conclusion

      -

      Dream League Soccer 2023 is a fantastic soccer game that lets you create your own club and compete in various leagues and tournaments. However, if you want to have more fun and freedom in the game, you can use a mod apk that gives you unlimited coins and diamonds and other features. In this article, we have explained what a mod apk is, why you should use it for Dream League Soccer 2023, what features it offers, and how to download and install it on your device. We hope this article was helpful and informative for you. If you have any questions or suggestions, feel free to leave them in the comments section below.

      -

      FAQs

      -

      Here are some frequently asked questions about Dream League Soccer 2023 mod apk:

      -

      Q: Is Dream League Soccer 2023 mod apk safe to use?

      -

      A: Yes, as long as you download it from a trusted source and scan it with an antivirus or anti-malware software before installing it. However, you should be aware that using a mod apk may violate the terms of service of the game or Google Play Store, and may result in your account being banned or suspended. Therefore, you should use it at your own risk and discretion.

      -

      Q: Do I need to root my device to use Dream League Soccer 2023 mod apk?

      -

      A: No, you do not need to root your device to use Dream League Soccer 2023 mod apk. Rooting is a process that gives you full access and control over your device's system settings and files, which may be required for some mod apks. However, Dream League Soccer 2023 mod apk does not require rooting, so you can use it without any problems.

      -

      Q: Can I update Dream League Soccer 2023 mod apk?

      -

      A: Yes, you can update Dream League Soccer 2023 mod apk whenever there is a new version available. However, you should not update it from the Google Play Store, as this may overwrite the mod apk and remove its features. Instead, you should download the latest version of the mod apk from the same source you downloaded it before, and install it over the existing one. This way, you can keep the mod apk and its features intact.

      -

      Q: Can I play online with Dream League Soccer 2023 mod apk?

      -

      A: Yes, you can play online with Dream League Soccer 2023 mod apk, but only with other players who are using the same mod apk. If you try to play online with players who are using the original version of the game, you may encounter some errors or compatibility issues. Therefore, it is recommended that you play online with your friends who are also using the mod apk, or join a community of mod apk users.

      -

      Q: Can I use Dream League Soccer 2023 mod apk on iOS devices?

      -

      A: No, you cannot use Dream League Soccer 2023 mod apk on iOS devices, as it is only compatible with Android devices. iOS devices have a different operating system and file format than Android devices, which makes it impossible to install or run mod apks on them. If you want to use a mod apk on iOS devices, you need to find a similar app that is designed for iOS devices, or use an emulator that can simulate an Android device on your iOS device.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Get I am a Rider Song in High Quality MP3 320kbps Format.md b/spaces/congsaPfin/Manga-OCR/logs/How to Get I am a Rider Song in High Quality MP3 320kbps Format.md deleted file mode 100644 index 8d516e7da439551078e6b02e9a093cd8ba53ae4e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Get I am a Rider Song in High Quality MP3 320kbps Format.md +++ /dev/null @@ -1,103 +0,0 @@ - -

      How to Download "I am a Rider" Song in MP3 320kbps

      -

      "I am a Rider" is a catchy and energetic song that has become a viral sensation on social media platforms like TikTok, Instagram, and YouTube. If you are a fan of this song and want to download it in high-quality MP3 format, you are in the right place. In this article, we will show you what "I am a Rider" song is, why you should download it in MP3 320kbps, and how to do it easily and safely.

      -

      i am a rider song download mp3 320kbps


      Download ❤❤❤ https://urlca.com/2uOccP



      -

      What is "I am a Rider" Song?

      -

      "I am a Rider" is a song by Pakistani-British rapper Imran Khan, also known as IK. The song was originally released in 2013 as part of his second album, Satisfya. The song is also known as "Satisfya" or "Gaddi Lamborghini", as these are some of the prominent words in the chorus.

      -

      The origin and popularity of the song

      -

      Imran Khan is a popular rapper and singer who rose to fame with his debut album, Unforgettable, in 2009. He is known for his unique style of blending Punjabi and English lyrics with urban beats. His songs have been featured in Bollywood movies and have won several awards.

      -

      "I am a Rider" is one of his most successful songs, as it has over 500 million views on YouTube and over 100 million streams on Spotify. The song has also become a viral hit on social media platforms like TikTok, Instagram, and YouTube, where users create videos using the song as a background music. Some of the popular themes of these videos are car racing, bike stunts, action scenes, and dance moves.

      -

      The meaning and lyrics of the song

      -

      "I am a Rider" is a song that expresses Imran Khan's confidence and attitude as a rapper and a performer. He claims that he is a rider, a provider, and a fire-bringer who can satisfy his fans with his music. He also boasts about his wealth, fame, and skills as he compares himself to famous figures like Harry Houdini, Mr. Makaveli (Tupac Shakur), and diamond chain wearer.

      -

      The song has a catchy chorus that goes like this:

      -
      -

      I'm a rider
      -Provider
      -Bring the heat girl
      -I will bring the fire
      -And my name keeps
      -Scoring worldwider
      -So my job is to satisfy yaaa (aaa, ay, satisfy aaa, ai)

      -
      -

      You can find the full lyrics of the song here.

      -

      Why Download "I am a Rider" Song in MP3 320kbps?

      -

      If you love listening to "I am a Rider" song, you might want to download it in MP3 320kbps format. Why? Because MP3 320kbps is the best quality option for MP3 files that offers clear sound, rich bass, and minimal distortion.

      -

      i am a rider satisfya mp3 download 320kbps
      -i am a rider lamborghini song download mp3 320kbps
      -i am a rider imran khan mp3 download 320kbps
      -i am a rider song download pagalworld mp3 320kbps
      -i am a rider remix song download mp3 320kbps
      -i am a rider female version song download mp3 320kbps
      -i am a rider gaddi lamborghini mp3 download 320kbps
      -i am a rider full song download mp3 320kbps
      -i am a rider english song download mp3 320kbps
      -i am a rider video song download mp3 320kbps
      -i am a rider ringtone download mp3 320kbps
      -i am a rider lyrics song download mp3 320kbps
      -i am a rider dj song download mp3 320kbps
      -i am a rider new song download mp3 320kbps
      -i am a rider original song download mp3 320kbps
      -i am a rider punjabi song download mp3 320kbps
      -i am a rider rap song download mp3 320kbps
      -i am a rider bike song download mp3 320kbps
      -i am a rider whatsapp status song download mp3 320kbps
      -i am a rider bass boosted song download mp3 320kbps
      -i am a rider sza song download mp3 320kbps
      -i am a rider ghost rider song download mp3 320kbps
      -i am a rider car song download mp3 320kbps
      -i am a rider tiktok song download mp3 320kbps
      -i am a rider instrumental song download mp3 320kbps
      -i am a rider nightcore song download mp3 320kbps
      -i am a rider joker song download mp3 320kbps
      -i am a rider attitude song download mp3 320kbps
      -i am a rider bgm song download mp3 320kbps
      -i am a rider trap song download mp3 320kbps
      -i am a rider dhol mix song download mp3 320kbps
      -i am a rider arabic song download mp3 320kbps
      -i am a rider tamil version song download mp3 320kbps
      -i am a rider hindi version song download mp3 320kbps
      -i am a rider telugu version song download mp3 320kbps
      -i am a rider marathi version song download mp3 320kbps
      -i am a rider kannada version song download mp3 320kbps
      -i am a rider malayalam version song download mp3 320kbps
      -i am a rider bengali version song download mp3 320kbps
      -i am a rider urdu version song download mp3 320kbps
      -i am a rider gujarati version song download mp3 320kbps
      -i am a rider odia version song download mp3 320kbps
      -i am a rider nepali version song download mp3 320kbps
      -i am a rider sinhala version song download mp3 320kbps
      -i am a rider haryanvi version song download mp3 320kbps
      -i am a rider rajasthani version song download mp3 320kbps
      -i am a rider bhojpuri version song download mp3 320kbps
      -i am a rider assamese version song download mp3 320kbps
      -i am a rider manipuri version song download mp3 320kbps

      -

      The benefits of MP3 320kbps format

      -

      MP3 is one of the most popular audio formats that compresses sound data to reduce file size without losing much quality. However, not all MP3 files are created equal. The quality of an MP P3 file depends on its bit rate, which is the number of bits per second that are used to encode the sound. The higher the bit rate, the better the quality and the larger the file size. MP3 files can have different bit rates, ranging from 32 kbps to 320 kbps. MP3 320kbps is the highest and best bit rate for MP3 files. Some of the benefits of MP3 320kbps format are: - It offers the closest sound quality to the original source, such as a CD or a studio recording. - It preserves the details and nuances of the sound, such as the vocals, instruments, and effects. - It delivers a rich and balanced sound, with clear highs, mids, and lows. - It reduces the noise and distortion that can occur in lower bit rates. - It is compatible with most devices and players that support MP3 format.

      The best sources to download MP3 320kbps songs

      -

      There are many websites and apps that allow you to download MP3 320kbps songs for free or for a fee. However, not all of them are safe, legal, or reliable. Some of them may contain viruses, malware, or spyware that can harm your device or compromise your privacy. Some of them may also violate the copyright laws and infringe on the rights of the artists and producers.

      -

      Therefore, you should be careful and selective when choosing a source to download MP3 320kbps songs. Here are some tips to help you find the best sources: - Look for reputable and trusted websites and apps that have positive reviews and ratings from users and experts. - Check the terms and conditions and the privacy policy of the website or app before downloading anything. - Make sure the website or app has a secure connection (HTTPS) and a valid certificate. - Avoid clicking on pop-ups, ads, or links that look suspicious or irrelevant. - Use antivirus software and firewall to protect your device from potential threats.

      Some of the best sources to download MP3 320kbps songs are: - Spotify Premium: Spotify is one of the most popular music streaming services that offers millions of songs in various genres and languages. Spotify Premium is a paid subscription that allows you to download up to 10,000 songs in MP3 320kbps quality on up to five devices. You can also enjoy ad-free music, offline listening, and unlimited skips. Spotify Premium costs $9.99 per month for individual users, $14.99 per month for family users (up to six accounts), and $4.99 per month for student users. - YouTube Music Premium: YouTube Music is another popular music streaming service that offers a huge library of songs, videos, playlists, and live performances. YouTube Music Premium is a paid subscription that allows you to download songs in MP3 256kbps quality on your device. You can also enjoy ad-free music, background play, and offline access. YouTube Music Premium costs $9.99 per month for individual users, $14.99 per month for family users (up to six accounts), and $4.99 per month for student users. - Amazon Music Unlimited: Amazon Music is a music streaming service that offers over 70 million songs in various genres and languages. Amazon Music Unlimited is a paid subscription that allows you to download songs in MP3 256kbps quality on your device. You can also enjoy ad-free music, offline listening, and unlimited skips. Amazon Music Unlimited costs $9.99 per month for individual users, $14.99 per month for family users (up to six accounts), $7.99 per month for Prime members, and $3.99 per month for Echo device owners.

      -

      How to Download "I am a Rider" Song in MP3 320kbps?

      -

      If you want to download "I am a Rider" song in MP3 320kbps for free, you can use a YouTube to MP3 converter tool. This is a tool that allows you to convert any YouTube video into an MP3 file with your desired quality. However, you should be aware that this method may not be legal or ethical in some countries or regions, as it may violate the copyright laws and infringe on the rights of the artists and producers. Therefore, you should use this method at your own risk and discretion. Here are the steps to download "I am a Rider" song in MP3 320kbps using a YouTube to MP3 converter tool:

      Step 1: Find a reliable YouTube to MP3 converter tool

      -

      There are many YouTube to MP3 converter tools available online, but not all of them are reliable, safe, or fast. Some of them may have low-quality conversions, limited options, annoying ads, or hidden fees. Therefore, you should find a reliable YouTube to MP3 converter tool that has the following features: - It supports MP3 320kbps quality option - It has a simple and user-friendly interface - It has a fast and smooth conversion process - It does not require any registration or installation - It does not have any malware, spyware, or viruses One of the YouTube to MP3 converter tools that meets these criteria is 4K YouTube to MP3. This is a free and easy-to-use tool that allows you to download any YouTube video in MP3 320kbps quality with just a few clicks. You can also use this tool to download videos from other platforms like Vimeo, SoundCloud, Facebook, and Instagram.

      -

      Step 2: Copy and paste the YouTube link of the song

      -

      Once you have found a reliable YouTube to MP3 converter tool, the next step is to copy and paste the YouTube link of the song that you want to download. To do this, you need to: - Go to YouTube and search for "I am a Rider" song by Imran Khan - Click on the video that has the official audio of the song - Copy the URL of the video from the address bar of your browser - Go to the YouTube to MP3 converter tool and paste the URL in the input box Alternatively, you can also use the browser extension or the desktop application of the YouTube to MP3 converter tool if they are available.

      -

      Step 3: Choose the MP3 320kbps quality option

      -

      After you have pasted the YouTube link of the song, the next step is to choose the MP3 320kbps quality option. To do this, you need to: - Click on the drop-down menu next to the input box - Select the MP3 320kbps option from the list of available formats and qualities You can also choose other options like MP3 256kbps, MP3 128kbps, or M4A if you prefer.

      -

      Step 4: Download and enjoy the song

      -

      The final step is to download and enjoy the song. To do this, you need to: - Click on the "Convert" or "Download" button - Wait for a few seconds until the conversion is completed - Click on the "Download" or "Save" button - Choose a destination folder on your device where you want to save the file - Open the file and play it with your favorite music player You can also share the file with your friends or family via email, Bluetooth, or social media.

      -

      Conclusion

      -

      "I am a Rider" is a popular and catchy song by Imran Khan that has become a viral sensation on social media platforms. If you want to download this song in high-quality MP3 320kbps format, you can use a YouTube to MP3 converter tool like 4K YouTube to MP3. This is a free and easy-to-use tool that allows you to convert any YouTube video into an MP3 file with your desired quality. However, you should be aware that this method may not be legal or ethical in some countries or regions, as it may violate the copyright laws and infringe on the rights of the artists and producers. Therefore, you should use this method at your own risk and discretion. Alternatively, you can also download "I am a Rider" song in MP3 320kbps format from legal and ethical sources like Spotify Premium, YouTube Music Premium, or Amazon Music Unlimited. These are paid subscriptions that allow you to download millions of songs in high-quality MP3 format on your device. You can also enjoy other benefits like ad-free music, offline listening, and unlimited skips. We hope this article has helped you learn how to download "I am a Rider" song in MP3 320kbps. If you have any questions or feedback, please feel free to leave a comment below. Happy listening!

      FAQs

      -

      Here are some of the frequently asked questions about downloading "I am a Rider" song in MP3 320kbps:

      -
        -
      1. Is it legal to download "I am a Rider" song in MP3 320kbps from YouTube?
      2. -

        It depends on the country or region where you live and the source where you download the song from. Some countries or regions may have strict copyright laws that prohibit downloading or distributing copyrighted content without the permission of the owners. Some sources may also have terms and conditions that forbid downloading or converting their content into other formats. Therefore, you should check the laws and the policies of the source before downloading anything from YouTube.

        -
      3. Is it safe to download "I am a Rider" song in MP3 320kbps from YouTube?
      4. -

        It depends on the tool that you use to download the song from YouTube. Some tools may be safe, reliable, and fast, while others may be unsafe, unreliable, or slow. Some tools may also contain viruses, malware, or spyware that can harm your device or compromise your privacy. Therefore, you should use a trusted and reputable tool like 4K YouTube to MP3 that has positive reviews and ratings from users and experts.

        -
      5. How long does it take to download "I am a Rider" song in MP3 320kbps from YouTube?
      6. -

        It depends on the speed of your internet connection, the size of the file, and the performance of the tool that you use to download the song from YouTube. Generally, it should not take more than a few minutes to download "I am a Rider" song in MP3 320kbps from YouTube. However, if you encounter any issues or errors, you can try again later or contact the support team of the tool.

        -
      7. How much space does "I am a Rider" song in MP3 320kbps take on my device?
      8. -

        It depends on the length and the quality of the song that you download from YouTube. Generally, an MP3 320kbps file takes about 2.4 MB per minute of audio. Therefore, if "I am a Rider" song is about 3 minutes long, it should take about 7.2 MB of space on your device.

        -
      9. Can I download other songs in MP3 320kbps from YouTube?
      10. -

        Yes, you can download other songs in MP3 320kbps from YouTube using the same method and tool that we have described in this article. However, you should be aware of the legal and ethical implications of doing so, as we have mentioned above.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/What is Robi Red Cube APK 2022 and Why You Need It.md b/spaces/congsaPfin/Manga-OCR/logs/What is Robi Red Cube APK 2022 and Why You Need It.md deleted file mode 100644 index 8475693569be2adff19a069ff4d9e8180e1a3f1b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/What is Robi Red Cube APK 2022 and Why You Need It.md +++ /dev/null @@ -1,228 +0,0 @@ - -

      Robi Red Cube APK 2022 Download: A Complete Guide for Retailers

      -

      If you are a retailer who sells Robi and Airtel SIMs and recharge services, you might have heard of the Robi Red Cube app. This app is a one-stop solution for all your easy load recharge and product requisition needs. It also offers many other features and benefits that can help you grow your business and earn more commissions.

      -

      In this article, we will explain what is Robi Red Cube app, how to download and install it, how to use it, and what are some alternatives to it. By the end of this article, you will have a clear idea of how to use this app to boost your sales and customer satisfaction.

      -

      robi red cube apk 2022 download


      Download ✏ ✏ ✏ https://urlca.com/2uO64C



      -

      What is Robi Red Cube App?

      -

      Robi Red Cube app is a mobile application developed by Robi Axiata Limited for its retailers. It allows retailers to perform easy load recharge for Robi and Airtel SIMs, check the offers according to the customer, request products, place complaints, view commissions, and manage their accounts. The app is not available on the Google Play Store, but it can be downloaded from other sources (see below).

      -

      Features of Robi Red Cube App

      -

      The app has many features that make it a useful tool for retailers. Some of the main features are:

      -
        -
      • Easy Load Recharge System: You can recharge any Robi or Airtel SIM with any amount or package using this app. You can also view the balance and validity of the SIMs.
      • -
      • Tong & GStore Recharge Available: You can also recharge Tong and GStore SIMs using this app. These are special SIMs that offer low-cost voice and data services.
      • -
      • Check the Offer According to the Customer: You can check the best offers for any Robi or Airtel SIM using this app. You can also send the offer details to the customer via SMS.
      • -
      • SIM Sales: You can sell new Robi or Airtel SIMs using this app. You can also activate the SIMs and register them with biometric verification.
      • -
      • Campaign: You can participate in various campaigns run by Robi or Airtel and earn rewards and incentives.
      • -
      • Commission: You can view your commission details and history using this app. You can also request for commission payout and check the status of your request.
      • -
      • Retailer Information: You can update your personal and business information using this app. You can also change your password and PIN.
      • -
      • Recharge History: You can view your recharge history and details using this app. You can also filter the history by date, amount, or SIM type.
      • -
      • Paint Reset: You can reset your paint balance using this app. Paint balance is a credit limit that allows you to recharge without having enough money in your account.
      • -
      • Device Control: You can control which devices can access your account using this app. You can also block or unblock any device.
      • -
      -

      Benefits of Robi Red Cube App

      -

      The

      The app also has many benefits that make it a valuable asset for retailers. Some of the main benefits are:

      -
        -
      • Convenience: You can perform all your recharge and product requisition tasks using this app. You don't need to visit any Robi or Airtel outlet or use any other device. You can also access the app anytime and anywhere.
      • -
      • Security: You can secure your account and transactions using this app. You can use a password and a PIN to log in and verify your actions. You can also block any unauthorized device from accessing your account.
      • -
      • Efficiency: You can save time and money using this app. You can recharge any SIM with any amount or package in seconds. You can also check the offers and commissions in real-time. You can also avoid errors and frauds by using biometric verification and SMS confirmation.
      • -
      • Customer Satisfaction: You can increase your customer satisfaction and loyalty using this app. You can offer them the best deals and services according to their needs. You can also communicate with them via SMS and resolve their issues quickly.
      • -
      • Growth: You can grow your business and income using this app. You can sell more SIMs and recharge services to more customers. You can also participate in campaigns and earn rewards and incentives.
      • -
      -

      How to Download and Install Robi Red Cube App?

      -

      If you want to download and install Robi Red Cube app, you need to follow these steps:

      -

      Download Links for Robi Red Cube App

      -

      The app is not available on the Google Play Store, but you can download it from other sources. Here are some of the download links for Robi Red Cube app:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      SourceLink
      Robi Official Websitehttps://www.robi.com.bd/redcube/
      APKPurehttps://apkpure.com/robi-red-cube/com.robi.redcube/
      APKCombohttps://apkcombo.com/robi-red-cube/com.robi.redcube/
      APKMonkhttps://www.apkmonk.com/app/com.robi.redcube/
      APKFollowhttps://apkfollow.com/app/robi-red-cube/com.robi.redcube/
      -

      Installation Steps for Robi Red Cube App

      -

      After downloading the app from any of the above sources, you need to install it on your device. Here are the installation steps for Robi Red Cube app:

      -
        -
      1. Go to the download folder on your device and locate the Robi Red Cube APK file.
      2. -
      3. Tap on the file and allow the installation from unknown sources if prompted.
      4. -
      5. Wait for the installation to complete and then open the app.
      6. -
      7. Login with your retailer ID and password. If you don't have an account, you can register with your mobile number and NID number.
      8. -
      9. Create a PIN for your account and verify it with an OTP sent to your mobile number.
      10. -
      11. You are now ready to use the app.
      12. -
      -

      How to Use Robi Red Cube App?

      -

      The app is very easy to use and has a user-friendly interface. Here are some of the main functions that you can perform using the app:

      -

      robi red cube retailer app free download v.3.0.4
      -robi red cube app apk latest version 2023
      -robi red cube easy load recharge system
      -robi red cube sim sales and commission
      -robi red cube retailer app features and benefits
      -robi red cube apk download from mega or one drive
      -robi red cube app for robi and airtel retailers
      -robi red cube app not in google play store
      -robi red cube app developed by sheba technologies ltd
      -robi red cube app device control and paint reset
      -how to use robi red cube app for flexiload business
      -robi red cube app tong and gstore recharge available
      -robi red cube app check the offer according to the customer
      -robi red cube app campaign and retailer information
      -robi red cube app recharge history and balance check
      -how to install robi red cube app on android device
      -how to update robi red cube app to latest version
      -how to get robi red cube app from area manager
      -how to solve robi red cube app login issues
      -how to contact robi red cube app customer support
      -how to register for robi red cube app as a retailer
      -how to activate and deactivate services on robi red cube app
      -how to transfer balance from robi red cube app to another retailer
      -how to change password and pin on robi red cube app
      -how to view and redeem rewards on robi red cube app

      -

      Easy Load Recharge System

      -

      This is the main function of the app that allows you to recharge any Robi or Airtel SIM with any amount or package. To do this, you need to follow these steps:

      -
        -
      1. Select the Easy Load option from the home screen of the app.
      2. -
      3. Enter the mobile number of the SIM that you want to recharge.
      4. -
      5. Select the amount or package that you want to recharge from the list of options.
      6. -
      7. Enter your PIN and confirm the transaction.
      8. -
      9. You will receive an SMS confirmation of the recharge along with the balance and validity details.
      10. -
      11. You can also view the recharge history and details by selecting the Recharge History option from the menu.
      12. -
      -

      Tong & GStore Recharge Available

      Tong & GStore Recharge Available

      -

      This is another function of the app that allows you to recharge Tong and GStore SIMs. These are special SIMs that offer low-cost voice and data services. To do this, you need to follow these steps:

      -
        -
      1. Select the Tong or GStore option from the home screen of the app.
      2. -
      3. Enter the mobile number of the SIM that you want to recharge.
      4. -
      5. Select the amount or package that you want to recharge from the list of options.
      6. -
      7. Enter your PIN and confirm the transaction.
      8. -
      9. You will receive an SMS confirmation of the recharge along with the balance and validity details.
      10. -
      11. You can also view the recharge history and details by selecting the Recharge History option from the menu.
      12. -
      -

      Check the Offer According to the Customer

      -

      This is a useful function of the app that allows you to check the best offers for any Robi or Airtel SIM. You can also send the offer details to the customer via SMS. To do this, you need to follow these steps:

      -
        -
      1. Select the Offer option from the home screen of the app.
      2. -
      3. Enter the mobile number of the SIM that you want to check the offer for.
      4. -
      5. The app will show you the best offers for that SIM according to its usage and preference.
      6. -
      7. You can select any offer and see its details and benefits.
      8. -
      9. You can also send the offer details to the customer by selecting the Send SMS option.
      10. -
      11. The customer will receive an SMS with the offer details and instructions on how to activate it.
      12. -
      -

      SIM Sales

      -

      This is a function of the app that allows you to sell new Robi or Airtel SIMs. You can also activate the SIMs and register them with biometric verification. To do this, you need to follow these steps:

      -
        -
      1. Select the SIM Sales option from the home screen of the app.
      2. -
      3. Scan or enter the barcode of the SIM that you want to sell.
      4. -
      5. The app will show you the SIM details and price.
      6. -
      7. Enter your PIN and confirm the transaction.
      8. -
      9. The app will activate the SIM and send an SMS confirmation to it.
      10. -
      11. You can also register the SIM with biometric verification by selecting the Register option.
      12. -
      13. The app will ask you to scan or enter the NID number of the customer and capture their fingerprint and photo.
      14. -
      15. The app will verify the biometric data and register the SIM with it.
      16. -
      17. The app will send an SMS confirmation to both you and the customer.
      18. -
      -

      Campaign

      -

      This is a function of the app that allows you to participate in various campaigns run by Robi or Airtel and earn rewards and incentives. To do this, you need to follow these steps:

      -
        -
      1. Select the Campaign option from the home screen of the app.
      2. -
      3. The app will show you the list of ongoing and upcoming campaigns that you can join.
      4. -
      5. You can select any campaign and see its details, objectives, criteria, rewards, and terms and conditions.
      6. -
      7. You can also join any campaign by selecting the Join option.
      8. -
      9. The app will register you for that campaign and send you an SMS confirmation.
      10. -
      11. You can also view your progress and performance in any campaign by selecting the View option.
      12. -

      Commission

      -

      This is a function of the app that allows you to view your commission details and history. You can also request for commission payout and check the status of your request. To do this, you need to follow these steps:

      -
        -
      1. Select the Commission option from the home screen of the app.
      2. -
      3. The app will show you your current commission balance and history.
      4. -
      5. You can filter the history by date, amount, or transaction type.
      6. -
      7. You can also request for commission payout by selecting the Request option.
      8. -
      9. The app will ask you to enter your bank account details and the amount that you want to withdraw.
      10. -
      11. Enter your PIN and confirm the request.
      12. -
      13. The app will process your request and send you an SMS confirmation.
      14. -
      15. You can also check the status of your request by selecting the Status option.
      16. -
      -

      Retailer Information

      -

      This is a function of the app that allows you to update your personal and business information. You can also change your password and PIN. To do this, you need to follow these steps:

      -
        -
      1. Select the Retailer Information option from the home screen of the app.
      2. -
      3. The app will show you your profile details and settings.
      4. -
      5. You can edit any information by selecting the Edit option.
      6. -
      7. The app will ask you to enter the new information and verify it with an OTP sent to your mobile number.
      8. -
      9. Enter your PIN and confirm the changes.
      10. -
      11. The app will update your information and send you an SMS confirmation.
      12. -
      13. You can also change your password or PIN by selecting the Change Password or Change PIN option.
      14. -
      15. The app will ask you to enter your current password or PIN and then enter the new one.
      16. -
      17. Enter your PIN and confirm the changes.
      18. -
      19. The app will change your password or PIN and send you an SMS confirmation.
      20. -
      -

      Recharge History

      -

      This is a function of the app that allows you to view your recharge history and details. You can also filter the history by date, amount, or SIM type. To do this, you need to follow these steps:

      -
        -
      1. Select the Recharge History option from the home screen of the app.
      2. -
      3. The app will show you your recharge history and details.
      4. -
      5. You can filter the history by date, amount, or SIM type by selecting the Filter option.
      6. -
      7. The app will show you the filtered history and details.
      8. -
      -

      Paint Reset

      -

      This is a function of the app that allows you to reset your paint balance. Paint balance is a credit limit that allows you to recharge without having enough money in your account. To do this, you need to follow these steps:

      -
        -
      1. Select the Paint Reset option from the home screen of the app.
      2. -
      3. The app will show you your current paint balance and limit.
      4. -
      5. You can reset your paint balance by selecting the Reset option.
      6. -
      7. The app will ask you to enter the amount that you want to reset.
      8. -
      9. Enter your PIN and confirm the reset.
      10. -
      11. The app will reset your paint balance and send you an SMS confirmation.
      12. -

      Device Control

      -

      This is a function of the app that allows you to control which devices can access your account. You can also block or unblock any device. To do this, you need to follow these steps:

      -
        -
      1. Select the Device Control option from the home screen of the app.
      2. -
      3. The app will show you the list of devices that are linked to your account.
      4. -
      5. You can block any device by selecting the Block option.
      6. -
      7. The app will ask you to enter your PIN and confirm the block.
      8. -
      9. The app will block the device and send you an SMS confirmation.
      10. -
      11. You can also unblock any device by selecting the Unblock option.
      12. -
      13. The app will ask you to enter your PIN and confirm the unblock.
      14. -
      15. The app will unblock the device and send you an SMS confirmation.
      16. -
      -

      Alternatives to Robi Red Cube App

      -

      If you are looking for some alternatives to Robi Red Cube app, you can try these apps:

      -

      My Robi App

      -

      This is an app that allows you to manage your Robi SIM and account. You can check your balance, validity, offers, packages, bills, and transactions. You can also recharge, buy bundles, pay bills, and transfer balance. You can also access various digital services and entertainment content. You can download this app from the Google Play Store or from https://www.robi.com.bd/my-robi/.

      -

      My Airtel App

      -

      This is an app that allows you to manage your Airtel SIM and account. You can check your balance, validity, offers, packages, bills, and transactions. You can also recharge, buy bundles, pay bills, and transfer balance. You can also access various digital services and entertainment content. You can download this app from the Google Play Store or from https://www.bd.airtel.com/my-airtel/.

      -

      Conclusion

      -

      Robi Red Cube app is a mobile application developed by Robi Axiata Limited for its retailers. It allows retailers to perform easy load recharge for Robi and Airtel SIMs, check the offers according to the customer, request products, place complaints, view commissions, and manage their accounts. The app is not available on the Google Play Store, but it can be downloaded from other sources. The app has many features and benefits that make it a useful tool for retailers. The app is also easy to use and has a user-friendly interface. However, if you are looking for some alternatives to Robi Red Cube app, you can try My Robi app or My Airtel app.

      -

      FAQs

      -

      Here are some of the frequently asked questions about Robi Red Cube app:

      -
        -
      1. Q: How can I contact Robi customer care using Robi Red Cube app?
      2. -
      3. A: You can contact Robi customer care using Robi Red Cube app by selecting the Complaint option from the home screen of the app. You can then select the category and sub-category of your complaint and write a description of your issue. You can also attach a screenshot or a photo of your issue if needed. You can then submit your complaint and wait for a response from Robi customer care.
      4. -
      5. Q: How can I update Robi Red Cube app to the latest version?
      6. -
      7. A: You can update Robi Red Cube app to the latest version by downloading it from any of the sources mentioned above. You can then install it over the existing version of the app. You don't need to uninstall the previous version of the app.
      8. -
      9. Q: How can I delete my account from Robi Red Cube app?
      10. -
      11. A: You can delete your account from Robi Red Cube app by contacting Robi customer care and requesting them to deactivate your account. You will need to provide your retailer ID and mobile number for verification. Once your account is deactivated, you will not be able to use Robi Red Cube app anymore.
      12. -
      13. Q: How can I get more customers using Robi Red Cube app?
      14. -
      15. A: You can get more customers using Robi Red Cube app by offering them the best deals and services according to their needs. You can also communicate with them via SMS and resolve their issues quickly. You can also participate in campaigns and earn rewards and incentives.
      16. -
      17. Q: How can I get more commissions using Robi Red Cube app?
      18. -
      19. A: You can get more commissions using Robi A: You can get more commissions using Robi Red Cube app by selling more SIMs and recharge services to more customers. You can also request for commission payout and check the status of your request. You can also view your commission details and history using the app.
      20. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Copal Dpb 1500 Drivers For Mac Best Practices and Tips.md b/spaces/contluForse/HuggingGPT/assets/Copal Dpb 1500 Drivers For Mac Best Practices and Tips.md deleted file mode 100644 index 038332545fd6814c5c3b32eb3ea6d3b16957017a..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Copal Dpb 1500 Drivers For Mac Best Practices and Tips.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      Please note that many of these drivers are currently underdevelopment, and we do not necessarily have full specifications on allof them. We will fill in this list as we verify successful operationof these printers. You can help by testing this with your own printerand reporting the results!

      -

      Copal Dpb 1500 Drivers For Mac


      Download Ziphttps://ssurll.com/2uzw2Q



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/evaluation/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/evaluation/__init__.py deleted file mode 100644 index f7cc4b23413a0639e9de00eeb0bf600632d2c6cd..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/evaluation/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .class_names import get_classes, get_palette -from .eval_hooks import DistEvalHook, EvalHook -from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou - -__all__ = [ - 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', - 'eval_metrics', 'get_classes', 'get_palette' -] diff --git a/spaces/crazybber/docker-demo-t5-translation/Dockerfile b/spaces/crazybber/docker-demo-t5-translation/Dockerfile deleted file mode 100644 index c32b6bee1372862a980972d8fb018e34e7630840..0000000000000000000000000000000000000000 --- a/spaces/crazybber/docker-demo-t5-translation/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# Use the official Python 3.11 image -FROM python:3.11 - -# Set the working directory to /code -WORKDIR /code - -# Copy the current directory contents into the container at /code -COPY ./requirements.txt /code/requirements.txt - -# Install requirements.txt -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/d8aai/image-search/app.py b/spaces/d8aai/image-search/app.py deleted file mode 100644 index 91e31b727a11c7437f0c7cbc2f68a8db23d65c0d..0000000000000000000000000000000000000000 --- a/spaces/d8aai/image-search/app.py +++ /dev/null @@ -1,102 +0,0 @@ -from sentence_transformers import SentenceTransformer, util -from PIL import Image -import pickle -import os -import gradio as gr -import zipfile -import logging - -logger = logging.getLogger(__name__) - -# Load CLIP model -text_model = SentenceTransformer("clip-ViT-B-32-multilingual-v1") - -image_model = SentenceTransformer("clip-ViT-B-32") -image_model.parallel_tokenization = False - -img_folder = ".\\photos\\" -if not os.path.exists(img_folder) or len(os.listdir(img_folder)) == 0: - os.makedirs(img_folder, exist_ok=True) - - photo_filename = "unsplash-25k-photos.zip" - if not os.path.exists(photo_filename): - util.http_get("http://sbert.net/datasets/" + - photo_filename, photo_filename) - - # Extract all images - with zipfile.ZipFile(photo_filename, "r") as zf: - for member in zf.infolist(): - zf.extract(member, img_folder) - - -emb_filename = ".\\unsplash-25k-photos-embeddings.pkl" -if not os.path.exists(emb_filename): - util.http_get( - "http://sbert.net/datasets/unsplash-25k-photos-embeddings.pkl", emb_filename - ) - -with open(emb_filename, "rb") as fIn: - img_names, img_emb = pickle.load(fIn) - - -img_folder = ".\\photos\\" -duplicates = util.paraphrase_mining_embeddings(img_emb) - - -def search_text(query, top_k=1): - """ " Search an image based on the text query. - - Args: - query ([string]): [query you want search for] - top_k (int, optional): [Amount of images o return]. Defaults to 1. - Returns: - [list]: [list of images that are related to the query.] - """ - log_query = query.encode("utf-8").decode("utf-8") - logger.warning(f"{log_query}, {top_k}") - # First, we encode the query. - query_emb = text_model.encode([query]) - - # Then, we use the util.semantic_search function, which computes the cosine-similarity - # between the query embedding and all image embeddings. - # It then returns the top_k highest ranked images, which we output - hits = util.semantic_search(query_emb, img_emb, top_k=top_k)[0] - - image = [] - for hit in hits: - object = Image.open(os.path.join( - ".\\photos\\", img_names[hit["corpus_id"]])) - image.append((object, img_names[hit["corpus_id"]])) - - return image - - -iface_search = gr.Interface( - title="Семантический поиск по картинке - d8a.ai", - description="""Демо-версия семантического поиска изображений, использующая - современные алгоритмы искусственного интеллекта для получения высокоточных - результатов поиска. Пользователи могут искать изображения с помощью запросов - на естественном языке и предварительно просматривать результаты. - Это приложение идеально подходит для создателей контента, маркетологов и менеджеров - социальных сетей и обеспечивает более интеллектуальный и интуитивно понятный - способ поиска и управления визуальным контентом.""", - fn=search_text, - allow_flagging="never", - inputs=[ - gr.inputs.Textbox( - lines=4, - label="Поисковый текст", - placeholder="Что вы хотите найти?", - default="Горы Кыргызстана", - ), - gr.inputs.Slider(minimum=0, maximum=9, default=5, - step=1, label="Количество"), - ], - outputs=gr.Gallery( - label="Найденные изображения", show_label=False, elem_id="gallery" - ).style(grid=[5], height="auto"), - examples=[[("Горы Кыргызстана"), 5], [("Люди Кыргызстана"), 2], - [("A dog with a ball"), 5]], -) - -iface_search.launch() diff --git a/spaces/danielcwang-optum/4-GeneratorCalcPipe/README.md b/spaces/danielcwang-optum/4-GeneratorCalcPipe/README.md deleted file mode 100644 index cfbae290768159ca13aa2945ec492c9a555e12dc..0000000000000000000000000000000000000000 --- a/spaces/danielcwang-optum/4-GeneratorCalcPipe/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🧠Generator Calc Writer📖💾 Gradio -emoji: 3-Gen📖 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/danterivers/music-generation-samples/audiocraft/data/audio_dataset.py b/spaces/danterivers/music-generation-samples/audiocraft/data/audio_dataset.py deleted file mode 100644 index cf21422ea0059cb2d6553f93e608b8f9fa0d3a50..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/audiocraft/data/audio_dataset.py +++ /dev/null @@ -1,525 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import copy -from concurrent.futures import ThreadPoolExecutor, Future -from dataclasses import dataclass, fields -from contextlib import ExitStack -import gzip -import json -import logging -import os -from pathlib import Path -import random -import sys -import typing as tp - -import torch -import torch.nn.functional as F - -from .audio import audio_read, audio_info -from .audio_utils import convert_audio -from .zip import PathInZip - -try: - import dora -except ImportError: - dora = None # type: ignore - - -@dataclass(order=True) -class BaseInfo: - - @classmethod - def _dict2fields(cls, dictionary: dict): - return { - field.name: dictionary[field.name] - for field in fields(cls) if field.name in dictionary - } - - @classmethod - def from_dict(cls, dictionary: dict): - _dictionary = cls._dict2fields(dictionary) - return cls(**_dictionary) - - def to_dict(self): - return { - field.name: self.__getattribute__(field.name) - for field in fields(self) - } - - -@dataclass(order=True) -class AudioMeta(BaseInfo): - path: str - duration: float - sample_rate: int - amplitude: tp.Optional[float] = None - weight: tp.Optional[float] = None - # info_path is used to load additional information about the audio file that is stored in zip files. - info_path: tp.Optional[PathInZip] = None - - @classmethod - def from_dict(cls, dictionary: dict): - base = cls._dict2fields(dictionary) - if 'info_path' in base and base['info_path'] is not None: - base['info_path'] = PathInZip(base['info_path']) - return cls(**base) - - def to_dict(self): - d = super().to_dict() - if d['info_path'] is not None: - d['info_path'] = str(d['info_path']) - return d - - -@dataclass(order=True) -class SegmentInfo(BaseInfo): - meta: AudioMeta - seek_time: float - n_frames: int # actual number of frames without padding - total_frames: int # total number of frames, padding included - sample_rate: int # actual sample rate - - -DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a'] - -logger = logging.getLogger(__name__) - - -def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta: - """AudioMeta from a path to an audio file. - - Args: - file_path (str): Resolved path of valid audio file. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - Returns: - AudioMeta: Audio file path and its metadata. - """ - info = audio_info(file_path) - amplitude: tp.Optional[float] = None - if not minimal: - wav, sr = audio_read(file_path) - amplitude = wav.abs().max().item() - return AudioMeta(file_path, info.duration, info.sample_rate, amplitude) - - -def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta: - """If Dora is available as a dependency, try to resolve potential relative paths - in list of AudioMeta. This method is expected to be used when loading meta from file. - - Args: - m (AudioMeta): Audio meta to resolve. - fast (bool): If True, uses a really fast check for determining if a file is already absolute or not. - Only valid on Linux/Mac. - Returns: - AudioMeta: Audio meta with resolved path. - """ - def is_abs(m): - if fast: - return str(m)[0] == '/' - else: - os.path.isabs(str(m)) - - if not dora: - return m - - if not is_abs(m.path): - m.path = dora.git_save.to_absolute_path(m.path) - if m.info_path is not None and not is_abs(m.info_path.zip_path): - m.info_path.zip_path = dora.git_save.to_absolute_path(m.path) - return m - - -def find_audio_files(path: tp.Union[Path, str], - exts: tp.List[str] = DEFAULT_EXTS, - resolve: bool = True, - minimal: bool = True, - progress: bool = False, - workers: int = 0) -> tp.List[AudioMeta]: - """Build a list of AudioMeta from a given path, - collecting relevant audio files and fetching meta info. - - Args: - path (str or Path): Path to folder containing audio files. - exts (list of str): List of file extensions to consider for audio files. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - progress (bool): Whether to log progress on audio files collection. - workers (int): number of parallel workers, if 0, use only the current thread. - Returns: - List[AudioMeta]: List of audio file path and its metadata. - """ - audio_files = [] - futures: tp.List[Future] = [] - pool: tp.Optional[ThreadPoolExecutor] = None - with ExitStack() as stack: - if workers > 0: - pool = ThreadPoolExecutor(workers) - stack.enter_context(pool) - - if progress: - print("Finding audio files...") - for root, folders, files in os.walk(path, followlinks=True): - for file in files: - full_path = Path(root) / file - if full_path.suffix.lower() in exts: - audio_files.append(full_path) - if pool is not None: - futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal)) - if progress: - print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr) - - if progress: - print("Getting audio metadata...") - meta: tp.List[AudioMeta] = [] - for idx, file_path in enumerate(audio_files): - try: - if pool is None: - m = _get_audio_meta(str(file_path), minimal) - else: - m = futures[idx].result() - if resolve: - m = _resolve_audio_meta(m) - except Exception as err: - print("Error with", str(file_path), err, file=sys.stderr) - continue - meta.append(m) - if progress: - print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr) - meta.sort() - return meta - - -def load_audio_meta(path: tp.Union[str, Path], - resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]: - """Load list of AudioMeta from an optionally compressed json file. - - Args: - path (str or Path): Path to JSON file. - resolve (bool): Whether to resolve the path from AudioMeta (default=True). - fast (bool): activates some tricks to make things faster. - Returns: - List[AudioMeta]: List of audio file path and its total duration. - """ - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'rb') as fp: # type: ignore - lines = fp.readlines() - meta = [] - for line in lines: - d = json.loads(line) - m = AudioMeta.from_dict(d) - if resolve: - m = _resolve_audio_meta(m, fast=fast) - meta.append(m) - return meta - - -def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]): - """Save the audio metadata to the file pointer as json. - - Args: - path (str or Path): Path to JSON file. - metadata (list of BaseAudioMeta): List of audio meta to save. - """ - Path(path).parent.mkdir(exist_ok=True, parents=True) - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'wb') as fp: # type: ignore - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - json_bytes = json_str.encode('utf-8') - fp.write(json_bytes) - - -class AudioDataset: - """Base audio dataset. - - The dataset takes a list of AudioMeta and create a dataset composed of segments of audio - and potentially additional information, by creating random segments from the list of audio - files referenced in the metadata and applying minimal data pre-processing such as resampling, - mixing of channels, padding, etc. - - If no segment_duration value is provided, the AudioDataset will return the full wav for each - audio file. Otherwise, it will randomly sample audio files and create a segment of the specified - duration, applying padding if required. - - By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True - allows to return a tuple containing the torch Tensor and additional metadata on the segment and the - original audio meta. - - Args: - meta (tp.List[AudioMeta]): List of audio files metadata. - segment_duration (float): Optional segment duration of audio to load. - If not specified, the dataset will load the full audio segment from the file. - shuffle (bool): Set to `True` to have the data reshuffled at every epoch. - sample_rate (int): Target sample rate of the loaded audio samples. - channels (int): Target number of channels of the loaded audio samples. - sample_on_duration (bool): Set to `True` to sample segments with probability - dependent on audio file duration. This is only used if `segment_duration` is provided. - sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of - `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product - of the file duration and file weight. This is only used if `segment_duration` is provided. - min_segment_ratio (float): Minimum segment ratio to use when the audio file - is shorter than the desired segment. - max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset. - return_info (bool): Whether to return the wav only or return wav along with segment info and metadata. - min_audio_duration (tp.Optional[float], optional): Minimum audio file duration, in seconds, if provided - audio shorter than this will be filtered out. - max_audio_duration (tp.Optional[float], optional): Maximal audio file duration in seconds, if provided - audio longer than this will be filtered out. - """ - def __init__(self, - meta: tp.List[AudioMeta], - segment_duration: tp.Optional[float] = None, - shuffle: bool = True, - num_samples: int = 10_000, - sample_rate: int = 48_000, - channels: int = 2, - pad: bool = True, - sample_on_duration: bool = True, - sample_on_weight: bool = True, - min_segment_ratio: float = 0.5, - max_read_retry: int = 10, - return_info: bool = False, - min_audio_duration: tp.Optional[float] = None, - max_audio_duration: tp.Optional[float] = None - ): - assert len(meta) > 0, 'No audio meta provided to AudioDataset. Please check loading of audio meta.' - assert segment_duration is None or segment_duration > 0 - assert segment_duration is None or min_segment_ratio >= 0 - logging.debug(f'sample_on_duration: {sample_on_duration}') - logging.debug(f'sample_on_weight: {sample_on_weight}') - logging.debug(f'pad: {pad}') - logging.debug(f'min_segment_ratio: {min_segment_ratio}') - - self.segment_duration = segment_duration - self.min_segment_ratio = min_segment_ratio - self.max_audio_duration = max_audio_duration - self.min_audio_duration = min_audio_duration - if self.min_audio_duration is not None and self.max_audio_duration is not None: - assert self.min_audio_duration <= self.max_audio_duration - self.meta: tp.List[AudioMeta] = self._filter_duration(meta) - assert len(self.meta) # Fail fast if all data has been filtered. - self.total_duration = sum(d.duration for d in self.meta) - - if segment_duration is None: - num_samples = len(self.meta) - self.num_samples = num_samples - self.shuffle = shuffle - self.sample_rate = sample_rate - self.channels = channels - self.pad = pad - self.sample_on_weight = sample_on_weight - self.sample_on_duration = sample_on_duration - self.sampling_probabilities = self._get_sampling_probabilities() - self.max_read_retry = max_read_retry - self.return_info = return_info - - def __len__(self): - return self.num_samples - - def _get_sampling_probabilities(self, normalized: bool = True): - """Return the sampling probabilities for each file inside `self.meta`. - """ - scores: tp.List[float] = [] - for file_meta in self.meta: - score = 1. - if self.sample_on_weight and file_meta.weight is not None: - score *= file_meta.weight - if self.sample_on_duration: - score *= file_meta.duration - scores.append(score) - probabilities = torch.tensor(scores) - if normalized: - probabilities /= probabilities.sum() - return probabilities - - def sample_file(self, rng: torch.Generator) -> AudioMeta: - """Sample a given file from `self.meta`. Can be overriden in subclasses. - This is only called if `segment_duration` is not None. - - You must use the provided random number generator `rng` for reproducibility. - """ - if not self.sample_on_weight and not self.sample_on_duration: - file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item()) - else: - file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item()) - - return self.meta[file_index] - - def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]: - if self.segment_duration is None: - file_meta = self.meta[index] - out, sr = audio_read(file_meta.path) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames, - sample_rate=self.sample_rate) - else: - rng = torch.Generator() - if self.shuffle: - # We use index, plus extra randomness - rng.manual_seed(index + self.num_samples * random.randint(0, 2**24)) - else: - # We only use index - rng.manual_seed(index) - - for retry in range(self.max_read_retry): - file_meta = self.sample_file(rng) - # We add some variance in the file position even if audio file is smaller than segment - # without ending up with empty segments - max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio) - seek_time = torch.rand(1, generator=rng).item() * max_seek - try: - out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - target_frames = int(self.segment_duration * self.sample_rate) - if self.pad: - out = F.pad(out, (0, target_frames - n_frames)) - segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames, - sample_rate=self.sample_rate) - except Exception as exc: - logger.warning("Error opening file %s: %r", file_meta.path, exc) - if retry == self.max_read_retry - 1: - raise - else: - break - - if self.return_info: - # Returns the wav and additional information on the wave segment - return out, segment_info - else: - return out - - def collater(self, samples): - """The collater function has to be provided to the dataloader - if AudioDataset has return_info=True in order to properly collate - the samples of a batch. - """ - if self.segment_duration is None and len(samples) > 1: - assert self.pad, "Must allow padding when batching examples of different durations." - - # In this case the audio reaching the collater is of variable length as segment_duration=None. - to_pad = self.segment_duration is None and self.pad - if to_pad: - max_len = max([wav.shape[-1] for wav, _ in samples]) - - def _pad_wav(wav): - return F.pad(wav, (0, max_len - wav.shape[-1])) - - if self.return_info: - if len(samples) > 0: - assert len(samples[0]) == 2 - assert isinstance(samples[0][0], torch.Tensor) - assert isinstance(samples[0][1], SegmentInfo) - - wavs = [wav for wav, _ in samples] - segment_infos = [copy.deepcopy(info) for _, info in samples] - - if to_pad: - # Each wav could be of a different duration as they are not segmented. - for i in range(len(samples)): - # Determines the total legth of the signal with padding, so we update here as we pad. - segment_infos[i].total_frames = max_len - wavs[i] = _pad_wav(wavs[i]) - - wav = torch.stack(wavs) - return wav, segment_infos - else: - assert isinstance(samples[0], torch.Tensor) - if to_pad: - samples = [_pad_wav(s) for s in samples] - return torch.stack(samples) - - def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: - """Filters out audio files with short durations. - Removes from meta files that have durations that will not allow to samples examples from them. - """ - orig_len = len(meta) - - # Filter data that is too short. - if self.min_audio_duration is not None: - meta = [m for m in meta if m.duration >= self.min_audio_duration] - - # Filter data that is too long. - if self.max_audio_duration is not None: - meta = [m for m in meta if m.duration <= self.max_audio_duration] - - filtered_len = len(meta) - removed_percentage = 100*(1-float(filtered_len)/orig_len) - msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage - if removed_percentage < 10: - logging.debug(msg) - else: - logging.warning(msg) - return meta - - @classmethod - def from_meta(cls, root: tp.Union[str, Path], **kwargs): - """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file. - - Args: - root (str or Path): Path to root folder containing audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_dir(): - if (root / 'data.jsonl').exists(): - root = root / 'data.jsonl' - elif (root / 'data.jsonl.gz').exists(): - root = root / 'data.jsonl.gz' - else: - raise ValueError("Don't know where to read metadata from in the dir. " - "Expecting either a data.jsonl or data.jsonl.gz file but none found.") - meta = load_audio_meta(root) - return cls(meta, **kwargs) - - @classmethod - def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True, - exts: tp.List[str] = DEFAULT_EXTS, **kwargs): - """Instantiate AudioDataset from a path containing (possibly nested) audio files. - - Args: - root (str or Path): Path to root folder containing audio files. - minimal_meta (bool): Whether to only load minimal metadata or not. - exts (list of str): Extensions for audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_file(): - meta = load_audio_meta(root, resolve=True) - else: - meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True) - return cls(meta, **kwargs) - - -def main(): - logging.basicConfig(stream=sys.stderr, level=logging.INFO) - parser = argparse.ArgumentParser( - prog='audio_dataset', - description='Generate .jsonl files by scanning a folder.') - parser.add_argument('root', help='Root folder with all the audio files') - parser.add_argument('output_meta_file', - help='Output file to store the metadata, ') - parser.add_argument('--complete', - action='store_false', dest='minimal', default=True, - help='Retrieve all metadata, even the one that are expansive ' - 'to compute (e.g. normalization).') - parser.add_argument('--resolve', - action='store_true', default=False, - help='Resolve the paths to be absolute and with no symlinks.') - parser.add_argument('--workers', - default=10, type=int, - help='Number of workers.') - args = parser.parse_args() - meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True, - resolve=args.resolve, minimal=args.minimal, workers=args.workers) - save_audio_meta(args.output_meta_file, meta) - - -if __name__ == '__main__': - main() diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/open_clip/pann_model.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/open_clip/pann_model.py deleted file mode 100644 index 874a03fc6eabcfdf3a63c59ca1e05d4f991453c5..0000000000000000000000000000000000000000 --- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/open_clip/pann_model.py +++ /dev/null @@ -1,703 +0,0 @@ -# PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition -# Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn -# Some layers are re-designed for CLAP -import os - -os.environ["NUMBA_CACHE_DIR"] = "/tmp/" - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchlibrosa.stft import Spectrogram, LogmelFilterBank -from torchlibrosa.augmentation import SpecAugmentation - -from .utils import do_mixup, interpolate, pad_framewise_output -from .feature_fusion import iAFF, AFF, DAF - - -def init_layer(layer): - """Initialize a Linear or Convolutional layer.""" - nn.init.xavier_uniform_(layer.weight) - - if hasattr(layer, "bias"): - if layer.bias is not None: - layer.bias.data.fill_(0.0) - -def init_bn(bn): - """Initialize a Batchnorm layer.""" - bn.bias.data.fill_(0.0) - bn.weight.data.fill_(1.0) - - -class ConvBlock(nn.Module): - def __init__(self, in_channels, out_channels): - - super(ConvBlock, self).__init__() - - self.conv1 = nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ) - - self.conv2 = nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ) - - self.bn1 = nn.BatchNorm2d(out_channels) - self.bn2 = nn.BatchNorm2d(out_channels) - - self.init_weight() - - def init_weight(self): - init_layer(self.conv1) - init_layer(self.conv2) - init_bn(self.bn1) - init_bn(self.bn2) - - def forward(self, input, pool_size=(2, 2), pool_type="avg"): - - x = input - x = F.relu_(self.bn1(self.conv1(x))) - x = F.relu_(self.bn2(self.conv2(x))) - if pool_type == "max": - x = F.max_pool2d(x, kernel_size=pool_size) - elif pool_type == "avg": - x = F.avg_pool2d(x, kernel_size=pool_size) - elif pool_type == "avg+max": - x1 = F.avg_pool2d(x, kernel_size=pool_size) - x2 = F.max_pool2d(x, kernel_size=pool_size) - x = x1 + x2 - else: - raise Exception("Incorrect argument!") - - return x - - -class ConvBlock5x5(nn.Module): - def __init__(self, in_channels, out_channels): - - super(ConvBlock5x5, self).__init__() - - self.conv1 = nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(5, 5), - stride=(1, 1), - padding=(2, 2), - bias=False, - ) - - self.bn1 = nn.BatchNorm2d(out_channels) - - self.init_weight() - - def init_weight(self): - init_layer(self.conv1) - init_bn(self.bn1) - - def forward(self, input, pool_size=(2, 2), pool_type="avg"): - - x = input - x = F.relu_(self.bn1(self.conv1(x))) - if pool_type == "max": - x = F.max_pool2d(x, kernel_size=pool_size) - elif pool_type == "avg": - x = F.avg_pool2d(x, kernel_size=pool_size) - elif pool_type == "avg+max": - x1 = F.avg_pool2d(x, kernel_size=pool_size) - x2 = F.max_pool2d(x, kernel_size=pool_size) - x = x1 + x2 - else: - raise Exception("Incorrect argument!") - - return x - - -class AttBlock(nn.Module): - def __init__(self, n_in, n_out, activation="linear", temperature=1.0): - super(AttBlock, self).__init__() - - self.activation = activation - self.temperature = temperature - self.att = nn.Conv1d( - in_channels=n_in, - out_channels=n_out, - kernel_size=1, - stride=1, - padding=0, - bias=True, - ) - self.cla = nn.Conv1d( - in_channels=n_in, - out_channels=n_out, - kernel_size=1, - stride=1, - padding=0, - bias=True, - ) - - self.bn_att = nn.BatchNorm1d(n_out) - self.init_weights() - - def init_weights(self): - init_layer(self.att) - init_layer(self.cla) - init_bn(self.bn_att) - - def forward(self, x): - # x: (n_samples, n_in, n_time) - norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1) - cla = self.nonlinear_transform(self.cla(x)) - x = torch.sum(norm_att * cla, dim=2) - return x, norm_att, cla - - def nonlinear_transform(self, x): - if self.activation == "linear": - return x - elif self.activation == "sigmoid": - return torch.sigmoid(x) - - -class Cnn14(nn.Module): - def __init__( - self, - sample_rate, - window_size, - hop_size, - mel_bins, - fmin, - fmax, - classes_num, - enable_fusion=False, - fusion_type="None", - ): - - super(Cnn14, self).__init__() - - window = "hann" - center = True - pad_mode = "reflect" - ref = 1.0 - amin = 1e-10 - top_db = None - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram( - n_fft=window_size, - hop_length=hop_size, - win_length=window_size, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=True, - ) - - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank( - sr=sample_rate, - n_fft=window_size, - n_mels=mel_bins, - fmin=fmin, - fmax=fmax, - ref=ref, - amin=amin, - top_db=top_db, - freeze_parameters=True, - ) - - # Spec augmenter - self.spec_augmenter = SpecAugmentation( - time_drop_width=64, - time_stripes_num=2, - freq_drop_width=8, - freq_stripes_num=2, - ) - - self.bn0 = nn.BatchNorm2d(64) - - if (self.enable_fusion) and (self.fusion_type == "channel_map"): - self.conv_block1 = ConvBlock(in_channels=4, out_channels=64) - else: - self.conv_block1 = ConvBlock(in_channels=1, out_channels=64) - self.conv_block2 = ConvBlock(in_channels=64, out_channels=128) - self.conv_block3 = ConvBlock(in_channels=128, out_channels=256) - self.conv_block4 = ConvBlock(in_channels=256, out_channels=512) - self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024) - self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048) - - self.fc1 = nn.Linear(2048, 2048, bias=True) - self.fc_audioset = nn.Linear(2048, classes_num, bias=True) - - if (self.enable_fusion) and ( - self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"] - ): - self.mel_conv1d = nn.Sequential( - nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2), - nn.BatchNorm1d(64), # No Relu - ) - if self.fusion_type == "daf_1d": - self.fusion_model = DAF() - elif self.fusion_type == "aff_1d": - self.fusion_model = AFF(channels=64, type="1D") - elif self.fusion_type == "iaff_1d": - self.fusion_model = iAFF(channels=64, type="1D") - - if (self.enable_fusion) and ( - self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"] - ): - self.mel_conv2d = nn.Sequential( - nn.Conv2d(1, 64, kernel_size=(5, 5), stride=(6, 2), padding=(2, 2)), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True), - ) - - if self.fusion_type == "daf_2d": - self.fusion_model = DAF() - elif self.fusion_type == "aff_2d": - self.fusion_model = AFF(channels=64, type="2D") - elif self.fusion_type == "iaff_2d": - self.fusion_model = iAFF(channels=64, type="2D") - self.init_weight() - - def init_weight(self): - init_bn(self.bn0) - init_layer(self.fc1) - init_layer(self.fc_audioset) - - def forward(self, input, mixup_lambda=None, device=None): - """ - Input: (batch_size, data_length)""" - - if self.enable_fusion and input["longer"].sum() == 0: - # if no audio is longer than 10s, then randomly select one audio to be longer - input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True - - if not self.enable_fusion: - x = self.spectrogram_extractor( - input["waveform"].to(device=device, non_blocking=True) - ) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - else: - longer_list = input["longer"].to(device=device, non_blocking=True) - x = input["mel_fusion"].to(device=device, non_blocking=True) - longer_list_idx = torch.where(longer_list)[0] - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]: - new_x = x[:, 0:1, :, :].clone().contiguous() - # local processing - if len(longer_list_idx) > 0: - fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous() - FB, FC, FT, FF = fusion_x_local.size() - fusion_x_local = fusion_x_local.view(FB * FC, FT, FF) - fusion_x_local = torch.permute( - fusion_x_local, (0, 2, 1) - ).contiguous() - fusion_x_local = self.mel_conv1d(fusion_x_local) - fusion_x_local = fusion_x_local.view( - FB, FC, FF, fusion_x_local.size(-1) - ) - fusion_x_local = ( - torch.permute(fusion_x_local, (0, 2, 1, 3)) - .contiguous() - .flatten(2) - ) - if fusion_x_local.size(-1) < FT: - fusion_x_local = torch.cat( - [ - fusion_x_local, - torch.zeros( - (FB, FF, FT - fusion_x_local.size(-1)), - device=device, - ), - ], - dim=-1, - ) - else: - fusion_x_local = fusion_x_local[:, :, :FT] - # 1D fusion - new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous() - new_x[longer_list_idx] = self.fusion_model( - new_x[longer_list_idx], fusion_x_local - ) - x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :] - else: - x = new_x - elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]: - x = x # no change - - if self.training: - x = self.spec_augmenter(x) - # Mixup on spectrogram - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - if (self.enable_fusion) and ( - self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"] - ): - global_x = x[:, 0:1, :, :] - - # global processing - B, C, H, W = global_x.shape - global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type="avg") - if len(longer_list_idx) > 0: - local_x = x[longer_list_idx, 1:, :, :].contiguous() - TH = global_x.size(-2) - # local processing - B, C, H, W = local_x.shape - local_x = local_x.view(B * C, 1, H, W) - local_x = self.mel_conv2d(local_x) - local_x = local_x.view( - B, C, local_x.size(1), local_x.size(2), local_x.size(3) - ) - local_x = local_x.permute((0, 2, 1, 3, 4)).contiguous().flatten(2, 3) - TB, TC, _, TW = local_x.size() - if local_x.size(-2) < TH: - local_x = torch.cat( - [ - local_x, - torch.zeros( - (TB, TC, TH - local_x.size(-2), TW), - device=global_x.device, - ), - ], - dim=-2, - ) - else: - local_x = local_x[:, :, :TH, :] - - global_x[longer_list_idx] = self.fusion_model( - global_x[longer_list_idx], local_x - ) - x = global_x - else: - x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg") - - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block6(x, pool_size=(1, 1), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = torch.mean(x, dim=3) - - latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x = latent_x1 + latent_x2 - latent_x = latent_x.transpose(1, 2) - latent_x = F.relu_(self.fc1(latent_x)) - latent_output = interpolate(latent_x, 32) - - (x1, _) = torch.max(x, dim=2) - x2 = torch.mean(x, dim=2) - x = x1 + x2 - x = F.dropout(x, p=0.5, training=self.training) - x = F.relu_(self.fc1(x)) - embedding = F.dropout(x, p=0.5, training=self.training) - clipwise_output = torch.sigmoid(self.fc_audioset(x)) - - output_dict = { - "clipwise_output": clipwise_output, - "embedding": embedding, - "fine_grained_embedding": latent_output, - } - return output_dict - - -class Cnn6(nn.Module): - def __init__( - self, - sample_rate, - window_size, - hop_size, - mel_bins, - fmin, - fmax, - classes_num, - enable_fusion=False, - fusion_type="None", - ): - - super(Cnn6, self).__init__() - - window = "hann" - center = True - pad_mode = "reflect" - ref = 1.0 - amin = 1e-10 - top_db = None - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram( - n_fft=window_size, - hop_length=hop_size, - win_length=window_size, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=True, - ) - - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank( - sr=sample_rate, - n_fft=window_size, - n_mels=mel_bins, - fmin=fmin, - fmax=fmax, - ref=ref, - amin=amin, - top_db=top_db, - freeze_parameters=True, - ) - - # Spec augmenter - self.spec_augmenter = SpecAugmentation( - time_drop_width=64, - time_stripes_num=2, - freq_drop_width=8, - freq_stripes_num=2, - ) - - self.bn0 = nn.BatchNorm2d(64) - - self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64) - self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128) - self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256) - self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512) - - self.fc1 = nn.Linear(512, 512, bias=True) - self.fc_audioset = nn.Linear(512, classes_num, bias=True) - - self.init_weight() - - def init_weight(self): - init_bn(self.bn0) - init_layer(self.fc1) - init_layer(self.fc_audioset) - - def forward(self, input, mixup_lambda=None, device=None): - """ - Input: (batch_size, data_length)""" - - x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - - if self.training: - x = self.spec_augmenter(x) - - # Mixup on spectrogram - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - - x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = torch.mean(x, dim=3) - - latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x = latent_x1 + latent_x2 - latent_x = latent_x.transpose(1, 2) - latent_x = F.relu_(self.fc1(latent_x)) - latent_output = interpolate(latent_x, 16) - - (x1, _) = torch.max(x, dim=2) - x2 = torch.mean(x, dim=2) - x = x1 + x2 - x = F.dropout(x, p=0.5, training=self.training) - x = F.relu_(self.fc1(x)) - embedding = F.dropout(x, p=0.5, training=self.training) - clipwise_output = torch.sigmoid(self.fc_audioset(x)) - - output_dict = { - "clipwise_output": clipwise_output, - "embedding": embedding, - "fine_grained_embedding": latent_output, - } - - return output_dict - - -class Cnn10(nn.Module): - def __init__( - self, - sample_rate, - window_size, - hop_size, - mel_bins, - fmin, - fmax, - classes_num, - enable_fusion=False, - fusion_type="None", - ): - - super(Cnn10, self).__init__() - - window = "hann" - center = True - pad_mode = "reflect" - ref = 1.0 - amin = 1e-10 - top_db = None - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram( - n_fft=window_size, - hop_length=hop_size, - win_length=window_size, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=True, - ) - - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank( - sr=sample_rate, - n_fft=window_size, - n_mels=mel_bins, - fmin=fmin, - fmax=fmax, - ref=ref, - amin=amin, - top_db=top_db, - freeze_parameters=True, - ) - - # Spec augmenter - self.spec_augmenter = SpecAugmentation( - time_drop_width=64, - time_stripes_num=2, - freq_drop_width=8, - freq_stripes_num=2, - ) - - self.bn0 = nn.BatchNorm2d(64) - - self.conv_block1 = ConvBlock(in_channels=1, out_channels=64) - self.conv_block2 = ConvBlock(in_channels=64, out_channels=128) - self.conv_block3 = ConvBlock(in_channels=128, out_channels=256) - self.conv_block4 = ConvBlock(in_channels=256, out_channels=512) - self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024) - - self.fc1 = nn.Linear(1024, 1024, bias=True) - self.fc_audioset = nn.Linear(1024, classes_num, bias=True) - - self.init_weight() - - def init_weight(self): - init_bn(self.bn0) - init_layer(self.fc1) - init_layer(self.fc_audioset) - - def forward(self, input, mixup_lambda=None, device=None): - """ - Input: (batch_size, data_length)""" - - x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - - if self.training: - x = self.spec_augmenter(x) - - # Mixup on spectrogram - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - - x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg") - x = F.dropout(x, p=0.2, training=self.training) - x = torch.mean(x, dim=3) - - latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x = latent_x1 + latent_x2 - latent_x = latent_x.transpose(1, 2) - latent_x = F.relu_(self.fc1(latent_x)) - latent_output = interpolate(latent_x, 32) - - (x1, _) = torch.max(x, dim=2) - x2 = torch.mean(x, dim=2) - x = x1 + x2 - x = F.dropout(x, p=0.5, training=self.training) - x = F.relu_(self.fc1(x)) - embedding = F.dropout(x, p=0.5, training=self.training) - clipwise_output = torch.sigmoid(self.fc_audioset(x)) - - output_dict = { - "clipwise_output": clipwise_output, - "embedding": embedding, - "fine_grained_embedding": latent_output, - } - - return output_dict - - -def create_pann_model(audio_cfg, enable_fusion=False, fusion_type="None"): - try: - ModelProto = eval(audio_cfg.model_name) - model = ModelProto( - sample_rate=audio_cfg.sample_rate, - window_size=audio_cfg.window_size, - hop_size=audio_cfg.hop_size, - mel_bins=audio_cfg.mel_bins, - fmin=audio_cfg.fmin, - fmax=audio_cfg.fmax, - classes_num=audio_cfg.class_num, - enable_fusion=enable_fusion, - fusion_type=fusion_type, - ) - return model - except: - raise RuntimeError( - f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough." - ) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-f0e43e7d.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-f0e43e7d.css deleted file mode 100644 index fb320f5e9afc1570c36e34f44865052ff83acf86..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-f0e43e7d.css +++ /dev/null @@ -1 +0,0 @@ -.base-image.svelte-m3v3vb.svelte-m3v3vb{display:block;width:100%;height:auto}.container.svelte-m3v3vb.svelte-m3v3vb{display:flex;position:relative;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full)}.image-container.svelte-m3v3vb.svelte-m3v3vb{position:relative;top:0;left:0;flex-grow:1;width:100%;overflow:hidden}.fit-height.svelte-m3v3vb.svelte-m3v3vb{position:absolute;top:0;left:0;width:100%;height:100%;object-fit:contain}.mask.svelte-m3v3vb.svelte-m3v3vb{opacity:.85;transition:all .2s ease-in-out}.image-container.svelte-m3v3vb:hover .mask.svelte-m3v3vb{opacity:.3}.mask.active.svelte-m3v3vb.svelte-m3v3vb{opacity:1}.mask.inactive.svelte-m3v3vb.svelte-m3v3vb{opacity:0}.legend.svelte-m3v3vb.svelte-m3v3vb{display:flex;flex-direction:row;flex-wrap:wrap;align-content:center;justify-content:center;align-items:center;gap:var(--spacing-sm);padding:var(--spacing-sm)}.legend-item.svelte-m3v3vb.svelte-m3v3vb{display:flex;flex-direction:row;align-items:center;cursor:pointer;border-radius:var(--radius-sm);padding:var(--spacing-sm)} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/File-9c296a9c.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/File-9c296a9c.js deleted file mode 100644 index 7e50aed748cf5eb97b98f79dc9d235e6d92c99fb..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/File-9c296a9c.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as h,e as c,s as f,f as o,g as t,h as d,j as l,n as r,k as u}from"./index-39fce9e2.js";function g(i){let e,s,n;return{c(){e=o("svg"),s=o("path"),n=o("polyline"),t(s,"d","M13 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V9z"),t(n,"points","13 2 13 9 20 9"),t(e,"xmlns","http://www.w3.org/2000/svg"),t(e,"width","100%"),t(e,"height","100%"),t(e,"viewBox","0 0 24 24"),t(e,"fill","none"),t(e,"stroke","currentColor"),t(e,"stroke-width","1.5"),t(e,"stroke-linecap","round"),t(e,"stroke-linejoin","round"),t(e,"class","feather feather-file")},m(a,p){d(a,e,p),l(e,s),l(e,n)},p:r,i:r,o:r,d(a){a&&u(e)}}}class v extends h{constructor(e){super(),c(this,e,null,g,f,{})}}export{v as F}; -//# sourceMappingURL=File-9c296a9c.js.map diff --git a/spaces/derful/Chatgpt-academic/README.md b/spaces/derful/Chatgpt-academic/README.md deleted file mode 100644 index 53a4d31aa5e6907d40e13af9c6b2ba79feb6cb2f..0000000000000000000000000000000000000000 --- a/spaces/derful/Chatgpt-academic/README.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: Chatgpt-academic -emoji: 😻 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: main.py -pinned: false ---- - - -# ChatGPT 学术优化 - -**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的学术快捷键,欢迎发issue或者pull requests** - -If you like this project, please give it a Star. If you've come up with more useful academic shortcuts, feel free to open an issue or pull request. - -``` -代码中参考了很多其他优秀项目中的设计,主要包括: - -# 借鉴项目1:借鉴了ChuanhuChatGPT中读取OpenAI json的方法、记录历史问询记录的方法以及gradio queue的使用技巧 -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# 借鉴项目2:借鉴了mdtex2html中公式处理的方法 -https://github.com/polarwinkel/mdtex2html - -项目使用OpenAI的gpt-3.5-turbo模型,期待gpt-4早点放宽门槛😂 -``` - -> **Note** -> 请注意只有“红颜色”标识的函数插件(按钮)才支持读取文件。目前暂不能完善地支持pdf格式文献的翻译解读,尚不支持word格式文件的读取。 - - -
      - -功能 | 描述 ---- | --- -一键润色 | 支持一键润色、一键查找论文语法错误 -一键中英互译 | 一键中英互译 -一键代码解释 | 可以正确显示代码、解释代码 -自定义快捷键 | 支持自定义快捷键 -配置代理服务器 | 支持配置代理服务器 -模块化设计 | 支持自定义高阶的实验性功能 -自我程序剖析 | [实验性功能] 一键读懂本项目的源代码 -程序剖析 | [实验性功能] 一键可以剖析其他Python/C++项目 -读论文 | [实验性功能] 一键解读latex论文全文并生成摘要 -批量注释生成 | [实验性功能] 一键批量生成函数注释 -chat分析报告生成 | [实验性功能] 运行后自动生成总结汇报 -公式显示 | 可以同时显示公式的tex形式和渲染形式 -图片显示 | 可以在markdown中显示图片 -支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格 - -
      - -- 新界面 -
      - -
      - - - -- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板 -
      - -
      - -- 润色/纠错 -
      - -
      - - -- 支持GPT输出的markdown表格 -
      - -
      - -- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读 -
      - -
      - - -- 懒得看项目代码?整个工程直接给chatgpt炫嘴里 -
      - -
      - -## 直接运行 (Windows, Linux or MacOS) - -下载项目 - -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -我们建议将`config.py`复制为`config_private.py`并将后者用作个性化配置文件以避免`config.py`中的变更影响你的使用或不小心将包含你的OpenAI API KEY的`config.py`提交至本项目。 - -```sh -cp config.py config_private.py -``` - -在`config_private.py`中,配置 海外Proxy 和 OpenAI API KEY -``` -1. 如果你在国内,需要设置海外代理才能够使用 OpenAI API,你可以通过 config.py 文件来进行设置。 -2. 配置 OpenAI API KEY。你需要在 OpenAI 官网上注册并获取 API KEY。一旦你拿到了 API KEY,在 config.py 文件里配置好即可。 -``` -安装依赖 - -```sh -python -m pip install -r requirements.txt -``` - -或者,如果你希望使用`conda` - -```sh -conda create -n gptac 'gradio>=3.23' requests -conda activate gptac -python3 -m pip install mdtex2html -``` - -运行 - -```sh -python main.py -``` - -测试实验性功能 -``` -- 测试C++项目头文件分析 - input区域 输入 `./crazy_functions/test_project/cpp/libJPG` , 然后点击 "[实验] 解析整个C++项目(input输入项目根路径)" -- 测试给Latex项目写摘要 - input区域 输入 `./crazy_functions/test_project/latex/attention` , 然后点击 "[实验] 读tex论文写摘要(input输入项目根路径)" -- 测试Python项目分析 - input区域 输入 `./crazy_functions/test_project/python/dqn` , 然后点击 "[实验] 解析整个py项目(input输入项目根路径)" -- 测试自我代码解读 - 点击 "[实验] 请解析并解构此项目本身" -- 测试实验功能模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能 - 点击 "[实验] 实验功能函数模板" -``` -## 使用docker (Linux) - -``` sh -# 下载项目 -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# 配置 海外Proxy 和 OpenAI API KEY -config.py -# 安装 -docker build -t gpt-academic . -# 运行 -docker run --rm -it --net=host gpt-academic - -# 测试实验性功能 -## 测试自我代码解读 -点击 "[实验] 请解析并解构此项目本身" -## 测试实验功能模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能 -点击 "[实验] 实验功能函数模板" -##(请注意在docker中运行时,需要额外注意程序的文件访问权限问题) -## 测试C++项目头文件分析 -input区域 输入 ./crazy_functions/test_project/cpp/libJPG , 然后点击 "[实验] 解析整个C++项目(input输入项目根路径)" -## 测试给Latex项目写摘要 -input区域 输入 ./crazy_functions/test_project/latex/attention , 然后点击 "[实验] 读tex论文写摘要(input输入项目根路径)" -## 测试Python项目分析 -input区域 输入 ./crazy_functions/test_project/python/dqn , 然后点击 "[实验] 解析整个py项目(input输入项目根路径)" - -``` - - -## 自定义新的便捷按钮(学术快捷键自定义) -打开functional.py,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。) -例如 -``` -"超级英译中": { - - # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 - "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n", - - # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。 - "Suffix": "", - -}, -``` -
      - -
      - - -如果你发明了更好用的学术快捷键,欢迎发issue或者pull requests! - -## 配置代理 - -在```config.py```中修改端口与代理软件对应 - -
      - - -
      - -配置完成后,你可以用以下命令测试代理是否工作,如果一切正常,下面的代码将输出你的代理服务器所在地: -``` -python check_proxy.py -``` - -## 兼容性测试 - -### 图片显示: -
      - - -
      - -### 如果一个程序能够读懂并剖析自己: - -
      - -
      - -
      - -
      - -### 其他任意Python/Cpp项目剖析: -
      - -
      - -
      - -
      - -### Latex论文一键阅读理解与摘要生成 -
      - -
      - -### 自动报告生成 -
      - - - -
      - -### 模块化功能设计 -
      - - -
      - -## Todo: - -- (Top Priority) 调用另一个开源项目text-generation-webui的web接口,使用其他llm模型 -- 总结大工程源代码时,文本过长、token溢出的问题(目前的方法是直接二分丢弃处理溢出,过于粗暴,有效信息大量丢失) -- UI不够美观 - diff --git a/spaces/dexrm/Weewee/README.md b/spaces/dexrm/Weewee/README.md deleted file mode 100644 index f2233389d9aed5fac3aaca6eec0576c8e6f30f87..0000000000000000000000000000000000000000 --- a/spaces/dexrm/Weewee/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Weewee -emoji: 📚 -colorFrom: red -colorTo: yellow -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Amd A68m Chipset Driver Windows 7.md b/spaces/diacanFperku/AutoGPT/Amd A68m Chipset Driver Windows 7.md deleted file mode 100644 index d76d9b685ebcd6179819d39c37a860589b318c37..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Amd A68m Chipset Driver Windows 7.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      Most of the popular drivers that are already on the web will do just fine for most parts of your PC. There are some, however, that only work with specific components or versions of Windows. You'll want to download that driver. Several sites sell individually tailored drivers. As with many drivers, however, it's always best to download from the manufacturer.

      -

      Amd a68m chipset driver windows 7


      DOWNLOADhttps://gohhs.com/2uFUc4



      -

      The following table shows all the motherboards listed in the table above supported by drivers on the Internet. When you find your motherboard's name in this table, you can safely download the corresponding drivers.

      -

      In most cases, the simple process of installing or upgrading the AMD Ryzenchipset drivers can be completed without issues. However, if there have been recent changes to your system hardware configuration or the currently installed chipset drivers are extremely outdated, then it is recommended to uninstall the existing chipset drivers before installing the new package. Performing the uninstall ensures a clean system and can help reduce issues or conflicts which may occur with the new installation.

      -

      Key Features: - Supports AMD Ryzen 1st and 2nd Generation/ Ryzen with Radeon... Game experience: TURBO M.2 with M.2 Shield, StoreMI, AMD Turbo USB 3.1 GEN2 - Audio Boost 4... MULTI-GPU: With STEEL ARMOR PCI-E slots. Supports 2-Way AMD Crossfire - BIOS FLASHBACK+: Simply use a USB... package contains the files needed for installing the Chipset driver. If it has been installed, updating (overwrite-installing)...

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Crack No Cable Rocksmith 2014 Torrent.md b/spaces/diacanFperku/AutoGPT/Crack No Cable Rocksmith 2014 Torrent.md deleted file mode 100644 index 7c003437d916254963556c93ce44e2a30e5c9df2..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Crack No Cable Rocksmith 2014 Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

      crack no cable rocksmith 2014 torrent


      Download Ziphttps://gohhs.com/2uFU6l



      -
      - 3cee63e6c2
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats REPACK.md b/spaces/diacanFperku/AutoGPT/FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats REPACK.md deleted file mode 100644 index 3a40773df733312c5546d7b69c332760753b7589..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ - -

      manga download full free for windows 10 cracker[/url]
      [url= zindaggi rocks movie mp4 hindi [url= keygen cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.48 apk mod data for android [url= briletypeabumunult [url= quiz-academy-la-piramide-del-sab [url= [url= comics in english free download of chacha chaudhary pdf [url=
      walpzoffoopyiptyday [url= zindaggi rocks movie mp4 hindi [url= keygen cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.48 apk mod data for android [url= briletypeabumunult [url= quiz-academy-la-piramide-del-sab [url= [url= comics in english free download of chacha chaudhary pdf [url=
      [url=
      [url= zindaggi rocks movie mp4 hindi [url= keygen cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.48 apk mod data for android [url= briletypeabumunult [url= quiz-academy-la-piramide-del-sab [url= [url= cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.

      -

      your house prices at a time of your pandemic in the united states have appeared to drop by an average of around $1,000-$2, 000. a faucet dimension sized by idaho is put on in the beginning of the pandemic and also the condition portion is reduced to 7.2%, the smallest reported at this time.6% represents one of the worst situations in 10 years, immediately after indicating a 42.3% decrease having its starting point the 1918 influenza pandemic. boston is deemed to be the worst metropolis below in which there are numerous region locations which have already been quarantined.

      -

      FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats


      Download File ✪✪✪ https://gohhs.com/2uFTUa



      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Face2face Elementary Workbook Pdf Free.md b/spaces/diacanFperku/AutoGPT/Face2face Elementary Workbook Pdf Free.md deleted file mode 100644 index 11550dbc7d09298cf75698c2eb9d574283cd5df5..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Face2face Elementary Workbook Pdf Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Face2face Elementary Workbook Pdf Free


      DOWNLOADhttps://gohhs.com/2uFTYs



      - -Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf 4fefd39f24
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/Loaris Trojan Remover 3.0.76.211 Patch.md b/spaces/diacanFperku/AutoGPT/Loaris Trojan Remover 3.0.76.211 Patch.md deleted file mode 100644 index d6a9bce94f5b7718ed64099d3f6d354f8efcd0c3..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Loaris Trojan Remover 3.0.76.211 Patch.md +++ /dev/null @@ -1,141 +0,0 @@ -
      -

      Loaris Trojan Remover 3.0.76.211 Patch: A Comprehensive Review

      -

      If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer, you might want to consider Loaris Trojan Remover 3.0.76.211 Patch. This is a powerful program that can scan your system, detect and eliminate all kinds of threats, and optimize your PC performance.

      -

      Loaris Trojan Remover 3.0.76.211 Patch


      DOWNLOAD ✫✫✫ https://gohhs.com/2uFUjv



      -

      In this article, we will review the main features, benefits, and drawbacks of Loaris Trojan Remover 3.0.76.211 Patch, and help you decide if it is worth downloading and installing.

      - -

      What is Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Loaris Trojan Remover 3.0.76.211 Patch is a software that can help you get rid of various types of threats that often try to penetrate your system when working with the Internet. It can prevent trojans, spyware, hacking programs, and other unwanted software from invading your PC.

      -

      The program works on a simple principle: it checks all system files, scans the registry and applications, and only those that are activated when Windows starts. It can also scan your hard disk and reveal all vulnerabilities and hidden programs in your system.

      -

      The program's actions are aimed at blocking and removing threats. After starting, you can select the scan mode, after which all threats will be found and deleted.

      -

      - -

      What are the main features of Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Loaris Trojan Remover 3.0.76.211 Patch has many features that make it a powerful and versatile tool for malware removal and PC optimization.

      -
        -
      • It can detect and remove all kinds of threats, including trojans, worms, rootkits, ransomware, keyloggers, adware, spyware, etc.
      • -
      • It can block advertising banners and any other type of aggressive advertising that can interfere with your browsing experience.
      • -
      • It can close access to potentially dangerous files that can harm your system or compromise your privacy.
      • -
      • It can use new technologies that will not allow the threat to harm the operating system, as they will be destroyed before they can penetrate.
      • -
      • It can delete files that do not want to be deleted by simple methods, such as stubborn viruses that are deeply embedded in the system.
      • -
      • It can navigate in the network and remain invisible, which will protect you from hacker attacks.
      • -
      • It can conduct a full analysis of all emerging threats and provide you with detailed reports.
      • -
      • It can clean and optimize your PC by removing unnecessary files, fixing registry errors, defragmenting disks, etc.
      • -
      • It has a user-friendly interface with a large number of settings that allow you to customize the program according to your needs.
      • -
      • It has a multilingual support that includes English and Russian languages.
      • -
      - -

      What are the benefits of Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Loaris Trojan Remover 3.0.76.211 Patch has many benefits that make it a worthwhile choice for anyone who wants to keep their PC safe and fast.

      -
        -
      • It can provide you with comprehensive protection against various types of threats that can damage your data or steal your identity.
      • -
      • It can improve your browsing experience by blocking annoying ads and pop-ups that can slow down your internet connection or redirect you to malicious sites.
      • -
      • It can enhance your PC performance by removing junk files, fixing errors, and optimizing settings that can speed up your system.
      • -
      • It can save you time and money by preventing you from buying a new PC or paying for expensive repairs or antivirus subscriptions.
      • -
      • It can give you peace of mind by ensuring that your PC is clean and secure at all times.
      • -
      - -

      What are the drawbacks of Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Loaris Trojan Remover 3.0.76.211 Patch is not a perfect software and it has some drawbacks that you should be aware of before downloading and installing it.

      -
        -
      • It is not a full-fledged antivirus and it cannot replace your existing antivirus software. It is designed to complement your antivirus by removing the threats that it might miss or cannot handle.
      • -
      • It is not free and it requires a license key to activate its full functionality. You can download a trial version that will allow you to scan your PC but not remove any threats.
      • -
      • It might cause some false positives or conflicts with some legitimate programs or files that it might mistake for threats.
      • -
      • It might require some technical knowledge or skills to use some of its advanced features or settings.
      • -
      - -

      How to download and install Loaris Trojan Remover 3.0.76.211 Patch?

      -

      If you want to download and install Loaris Trojan Remover 3.0.76.211 Patch on your PC, you need to follow these steps:

      -
        -
      1. Go to the official website of Loaris Trojan Remover (https://loaris.app/) and click on the "Download" button.
      2. -
      3. Select the version that suits your operating system (Windows XP/Vista/7/8/10) and click on the "Download Now" button.
      4. -
      5. Save the file (loarist_setup.exe) on your computer and run it as an administrator.
      6. -
      7. Follow the instructions on the screen to complete the installation process.
      8. -
      9. To activate the full functionality of the program, you need to purchase a license key from the official website or from other authorized sources.
      10. -
      11. Enter the license key in the program's interface and click on the "Activate" button.
      12. -
      - -

      Conclusion

      -

      Loaris Trojan Remover 3.0.76.211 Patch is a powerful and effective tool that can help you remove trojans, malware, adware, and other unwanted software from your computer and optimize its performance.

      -

      The program has many features that make it versatile and user-friendly.

      -

      The program has many benefits that make it worthwhile for anyone who wants to keep their PC safe and fast.

      -

      The program has some drawbacks that you should be aware of before downloading and installing it.

      -

      The program is not free and it requires a license key to activate its full functionality.

      -

      If you want to download and install Loaris Trojan Remover 3.0.76.211 Patch on your PC, you need to follow some simple steps.

      -

      If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer,download Loaris Trojan Remover 3

      -

      How to use Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Using Loaris Trojan Remover 3.0.76.211 Patch is very easy and straightforward. You just need to follow these steps:

      -
        -
      1. Launch the program and click on the "Scan" button.
      2. -
      3. Choose the scan mode that suits your needs: standard, full, custom, or removable.
      4. -
      5. Wait for the scan to finish and review the results.
      6. -
      7. Select the items that you want to remove and click on the "Apply" button.
      8. -
      9. Restart your computer if necessary.
      10. -
      -

      You can also access other features of the program from the main interface, such as:

      -
        -
      • The "Tools" tab, where you can manage your startup items, browser settings, host files, Windows updates, etc.
      • -
      • The "Settings" tab, where you can adjust the program's options, such as scan parameters, update frequency, language, etc.
      • -
      • The "Log files" tab, where you can view the history of your scans and actions.
      • -
      • The "Help" tab, where you can get support from the developers or contact them for feedback.
      • -
      - -

      What are the alternatives to Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Loaris Trojan Remover 3.0.76.211 Patch is not the only software that can help you remove trojans, malware, adware, and other unwanted software from your computer. There are some alternatives that you can try if you are not satisfied with Loaris or if you want to compare different options.

      -

      Some of the most popular alternatives are:

      -
        -
      • Malwarebytes: This is a well-known and trusted software that can detect and remove various types of malware, including trojans, ransomware, spyware, adware, etc. It also offers real-time protection, web protection, exploit protection, etc.
      • -
      • HitmanPro: This is a lightweight and fast software that can scan your computer for malware and remove it in minutes. It uses cloud-based technology and behavioral analysis to detect and eliminate threats that other programs might miss.
      • -
      • Zemana AntiMalware: This is a simple and effective software that can scan your computer for malware and remove it with one click. It also offers real-time protection, ransomware protection, browser cleanup, etc.
      • -
      - -

      Is Loaris Trojan Remover 3.0.76.211 Patch worth it?

      -

      Loaris Trojan Remover 3.0.76.211 Patch is a powerful and effective tool that can help you remove trojans, malware, adware, and other unwanted software from your computer and optimize its performance.

      -

      The program has many features that make it versatile and user-friendly.

      -

      The program has many benefits that make it worthwhile for anyone who wants to keep their PC safe and fast.

      -

      The program has some drawbacks that you should be aware of before downloading and installing it.

      -

      The program is not free and it requires a license key to activate its full functionality.

      -

      If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer,download Loaris Trojan Remover 3 -

      What are the customer reviews of Loaris Trojan Remover 3.0.76.211 Patch?

      -

      Loaris Trojan Remover 3.0.76.211 Patch has received many positive reviews from customers who have used it to remove trojans, malware, adware, and other unwanted software from their computers.

      -

      Some of the most common praises are:

      -
        -
      • It is easy to use and has a clear and intuitive interface.
      • -
      • It is fast and efficient and can scan and clean the system in a short time.
      • -
      • It is thorough and accurate and can detect and eliminate all kinds of threats that other programs might miss or cannot handle.
      • -
      • It is versatile and customizable and can offer different scan modes and settings to suit different needs.
      • -
      • It is reliable and trustworthy and can provide comprehensive protection and optimization for the PC.
      • -
      -

      Some of the most common complaints are:

      -
        -
      • It is not free and it requires a license key to activate its full functionality.
      • -
      • It might cause some false positives or conflicts with some legitimate programs or files that it might mistake for threats.
      • -
      • It might require some technical knowledge or skills to use some of its advanced features or settings.
      • -
      • It does not have a live chat or phone support option and only offers email support.
      • -
      - -

      How to uninstall Loaris Trojan Remover 3.0.76.211 Patch?

      -

      If you want to uninstall Loaris Trojan Remover 3.0.76.211 Patch from your PC, you need to follow these steps:

      -
        -
      1. Close the program if it is running.
      2. -
      3. Go to the Control Panel and click on "Programs and Features".
      4. -
      5. Find Loaris Trojan Remover 3.0.76.211 Patch in the list of installed programs and click on "Uninstall".
      6. -
      7. Follow the instructions on the screen to complete the uninstallation process.
      8. -
      9. Restart your computer if necessary.
      10. -
      - -

      Conclusion

      -

      In conclusion, Loaris Trojan Remover 3.0.76.211 Patch is a powerful and effective tool that can help you remove trojans, malware, adware, and other unwanted software from your computer and optimize its performance.

      -

      The program has many features that make it versatile and user-friendly.

      -

      The program has many benefits that make it worthwhile for anyone who wants to keep their PC safe and fast.

      -

      The program has some drawbacks that you should be aware of before downloading and installing it.

      -

      The program is not free and it requires a license key to activate its full functionality.

      -

      If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer,download Loaris Trojan Remover 3 -

      Conclusion

      -

      In conclusion, Loaris Trojan Remover 3.0.76.211 Patch is a powerful and effective tool that can help you remove trojans, malware, adware, and other unwanted software from your computer and optimize its performance.

      -

      The program has many features that make it versatile and user-friendly. It can scan your system, detect and eliminate all kinds of threats, block annoying ads, close access to dangerous files, delete stubborn viruses, navigate in the network and remain invisible, conduct a full analysis of all emerging threats, and provide you with detailed reports.

      -

      The program has many benefits that make it worthwhile for anyone who wants to keep their PC safe and fast. It can provide you with comprehensive protection against various types of threats that can damage your data or steal your identity. It can improve your browsing experience by blocking annoying ads and pop-ups that can slow down your internet connection or redirect you to malicious sites. It can enhance your PC performance by removing junk files, fixing errors, and optimizing settings that can speed up your system. It can save you time and money by preventing you from buying a new PC or paying for expensive repairs or antivirus subscriptions. It can give you peace of mind by ensuring that your PC is clean and secure at all times.

      -

      The program has some drawbacks that you should be aware of before downloading and installing it. It is not a full-fledged antivirus and it cannot replace your existing antivirus software. It is not free and it requires a license key to activate its full functionality. It might cause some false positives or conflicts with some legitimate programs or files that it might mistake for threats. It might require some technical knowledge or skills to use some of its advanced features or settings. It does not have a live chat or phone support option and only offers email support.

      -

      If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer,download Loaris Trojan Remover 3

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Microsoft Train Simulator Free Download !!TOP!!.rar.md b/spaces/diacanFperku/AutoGPT/Microsoft Train Simulator Free Download !!TOP!!.rar.md deleted file mode 100644 index c463186f64f91919a23bb6a9457d8b6090fbfe7f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Microsoft Train Simulator Free Download !!TOP!!.rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

      microsoft train simulator free download.rar


      DOWNLOADhttps://gohhs.com/2uFVMV



      - -Download Microsoft Train Simulator 2001 pc game full version setup file in single, direct link for windows. This is the best Train simulator, a very good game. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/dineshreddy/WALT/mmdet/models/necks/nasfcos_fpn.py b/spaces/dineshreddy/WALT/mmdet/models/necks/nasfcos_fpn.py deleted file mode 100644 index 2daf79ef591373499184c624ccd27fb7456dec06..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/necks/nasfcos_fpn.py +++ /dev/null @@ -1,161 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, caffe2_xavier_init -from mmcv.ops.merge_cells import ConcatCell - -from ..builder import NECKS - - -@NECKS.register_module() -class NASFCOS_FPN(nn.Module): - """FPN structure in NASFPN. - - Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for - Object Detection `_ - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=1, - end_level=-1, - add_extra_convs=False, - conv_cfg=None, - norm_cfg=None): - super(NASFCOS_FPN, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.norm_cfg = norm_cfg - self.conv_cfg = conv_cfg - - if end_level == -1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - self.backbone_end_level = end_level - assert end_level <= len(in_channels) - assert num_outs == end_level - start_level - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - self.adapt_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - adapt_conv = ConvModule( - in_channels[i], - out_channels, - 1, - stride=1, - padding=0, - bias=False, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU', inplace=False)) - self.adapt_convs.append(adapt_conv) - - # C2 is omitted according to the paper - extra_levels = num_outs - self.backbone_end_level + self.start_level - - def build_concat_cell(with_input1_conv, with_input2_conv): - cell_conv_cfg = dict( - kernel_size=1, padding=0, bias=False, groups=out_channels) - return ConcatCell( - in_channels=out_channels, - out_channels=out_channels, - with_out_conv=True, - out_conv_cfg=cell_conv_cfg, - out_norm_cfg=dict(type='BN'), - out_conv_order=('norm', 'act', 'conv'), - with_input1_conv=with_input1_conv, - with_input2_conv=with_input2_conv, - input_conv_cfg=conv_cfg, - input_norm_cfg=norm_cfg, - upsample_mode='nearest') - - # Denote c3=f0, c4=f1, c5=f2 for convince - self.fpn = nn.ModuleDict() - self.fpn['c22_1'] = build_concat_cell(True, True) - self.fpn['c22_2'] = build_concat_cell(True, True) - self.fpn['c32'] = build_concat_cell(True, False) - self.fpn['c02'] = build_concat_cell(True, False) - self.fpn['c42'] = build_concat_cell(True, True) - self.fpn['c36'] = build_concat_cell(True, True) - self.fpn['c61'] = build_concat_cell(True, True) # f9 - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - extra_act_cfg = None if i == 0 \ - else dict(type='ReLU', inplace=False) - self.extra_downsamples.append( - ConvModule( - out_channels, - out_channels, - 3, - stride=2, - padding=1, - act_cfg=extra_act_cfg, - order=('act', 'norm', 'conv'))) - - def forward(self, inputs): - """Forward function.""" - feats = [ - adapt_conv(inputs[i + self.start_level]) - for i, adapt_conv in enumerate(self.adapt_convs) - ] - - for (i, module_name) in enumerate(self.fpn): - idx_1, idx_2 = int(module_name[1]), int(module_name[2]) - res = self.fpn[module_name](feats[idx_1], feats[idx_2]) - feats.append(res) - - ret = [] - for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5 - feats1, feats2 = feats[idx], feats[5] - feats2_resize = F.interpolate( - feats2, - size=feats1.size()[2:], - mode='bilinear', - align_corners=False) - - feats_sum = feats1 + feats2_resize - ret.append( - F.interpolate( - feats_sum, - size=inputs[input_idx].size()[2:], - mode='bilinear', - align_corners=False)) - - for submodule in self.extra_downsamples: - ret.append(submodule(ret[-1])) - - return tuple(ret) - - def init_weights(self): - """Initialize the weights of module.""" - for module in self.fpn.values(): - if hasattr(module, 'conv_out'): - caffe2_xavier_init(module.out_conv.conv) - - for modules in [ - self.adapt_convs.modules(), - self.extra_downsamples.modules() - ]: - for module in modules: - if isinstance(module, nn.Conv2d): - caffe2_xavier_init(module) diff --git a/spaces/dirge/voicevox/voicevox_engine/engine_manifest/EngineManifest.py b/spaces/dirge/voicevox/voicevox_engine/engine_manifest/EngineManifest.py deleted file mode 100644 index 44a9329b40658999fda3f369887ab4455d86372d..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/voicevox_engine/engine_manifest/EngineManifest.py +++ /dev/null @@ -1,58 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel, Field - - -class UpdateInfo(BaseModel): - """ - エンジンのアップデート情報 - """ - - version: str = Field(title="エンジンのバージョン名") - descriptions: List[str] = Field(title="アップデートの詳細についての説明") - contributors: Optional[List[str]] = Field(title="貢献者名") - - -class LicenseInfo(BaseModel): - """ - 依存ライブラリのライセンス情報 - """ - - name: str = Field(title="依存ライブラリ名") - version: Optional[str] = Field(title="依存ライブラリのバージョン") - license: Optional[str] = Field(title="依存ライブラリのライセンス名") - text: str = Field(title="依存ライブラリのライセンス本文") - - -class SupportedFeatures(BaseModel): - """ - エンジンが持つ機能の一覧 - """ - - adjust_mora_pitch: bool = Field(title="モーラごとの音高の調整") - adjust_phoneme_length: bool = Field(title="音素ごとの長さの調整") - adjust_speed_scale: bool = Field(title="全体の話速の調整") - adjust_pitch_scale: bool = Field(title="全体の音高の調整") - adjust_intonation_scale: bool = Field(title="全体の抑揚の調整") - adjust_volume_scale: bool = Field(title="全体の音量の調整") - interrogative_upspeak: bool = Field(title="疑問文の自動調整") - synthesis_morphing: bool = Field(title="2人の話者でモーフィングした音声を合成") - manage_library: bool = Field(title="音声ライブラリのインストール・アンインストール") - - -class EngineManifest(BaseModel): - """ - エンジン自体に関する情報 - """ - - manifest_version: str = Field(title="マニフェストのバージョン") - name: str = Field(title="エンジン名") - brand_name: str = Field(title="ブランド名") - uuid: str = Field(title="エンジンのUUID") - url: str = Field(title="エンジンのURL") - icon: str = Field(title="エンジンのアイコンをBASE64エンコードしたもの") - default_sampling_rate: int = Field(title="デフォルトのサンプリング周波数") - terms_of_service: str = Field(title="エンジンの利用規約") - update_infos: List[UpdateInfo] = Field(title="エンジンのアップデート情報") - dependency_licenses: List[LicenseInfo] = Field(title="依存関係のライセンス情報") - supported_features: SupportedFeatures = Field(title="エンジンが持つ機能") diff --git a/spaces/domenicrosati/scite-qa-demo/app.py b/spaces/domenicrosati/scite-qa-demo/app.py deleted file mode 100644 index 4932ef8e3ae313ae8c6a65ce30cb4141998fc6da..0000000000000000000000000000000000000000 --- a/spaces/domenicrosati/scite-qa-demo/app.py +++ /dev/null @@ -1,411 +0,0 @@ -import streamlit as st -from transformers import pipeline, AutoTokenizer, LEDForConditionalGeneration -import requests -from bs4 import BeautifulSoup -import nltk -import string -from streamlit.components.v1 import html -from sentence_transformers.cross_encoder import CrossEncoder as CE -import re -from typing import List, Tuple -import torch - -SCITE_API_KEY = st.secrets["SCITE_API_KEY"] - -# class CrossEncoder: -# def __init__(self, model_path: str, **kwargs): -# self.model = CE(model_path, **kwargs) - -# def predict(self, sentences: List[Tuple[str,str]], batch_size: int = 32, show_progress_bar: bool = True) -> List[float]: -# return self.model.predict( -# sentences=sentences, -# batch_size=batch_size, -# show_progress_bar=show_progress_bar) - - -def remove_html(x): - soup = BeautifulSoup(x, 'html.parser') - text = soup.get_text() - return text.strip() - - -# 4 searches: strict y/n, supported y/n -# deduplicate -# search per query - -# options are abstract search -# all search - - -def search(term, limit=10, clean=True, strict=True, all_mode=True, abstracts=True, abstract_only=False): - term = clean_query(term, clean=clean, strict=strict) - # heuristic, 2 searches strict and not? and then merge? - # https://api.scite.ai/search?mode=all&term=unit%20testing%20software&limit=10&date_from=2000&date_to=2022&offset=0&supporting_from=1&contrasting_from=0&contrasting_to=0&user_slug=domenic-rosati-keW5&compute_aggregations=true - contexts, docs = [], [] - if not abstract_only: - mode = 'all' - if not all_mode: - mode = 'citations' - search = f"https://api.scite.ai/search?mode={mode}&term={term}&limit={limit}&offset=0&user_slug=domenic-rosati-keW5&compute_aggregations=false" - req = requests.get( - search, - headers={ - 'Authorization': f'Bearer {SCITE_API_KEY}' - } - ) - try: - req.json() - except: - pass - - contexts += [remove_html('\n'.join([cite['snippet'] for cite in doc['citations'] if cite['lang'] == 'en'])) for doc in req.json()['hits']] - docs += [(doc['doi'], doc['citations'], doc['title'], doc['abstract'] or '') - for doc in req.json()['hits']] - - if abstracts or abstract_only: - search = f"https://api.scite.ai/search?mode=papers&abstract={term}&limit={limit}&offset=0&user_slug=domenic-rosati-keW5&compute_aggregations=false" - req = requests.get( - search, - headers={ - 'Authorization': f'Bearer {SCITE_API_KEY}' - } - ) - try: - req.json() - contexts += [remove_html(doc['abstract'] or '') for doc in req.json()['hits']] - docs += [(doc['doi'], doc['citations'], doc['title'], doc['abstract'] or '') - for doc in req.json()['hits']] - except: - pass - - return ( - contexts, - docs - ) - - -def find_source(text, docs, matched): - for doc in docs: - for snippet in doc[1]: - if text in remove_html(snippet.get('snippet', '')): - if matched and remove_html(snippet.get('snippet', '')).strip() != matched.strip(): - continue - new_text = text - for sent in nltk.sent_tokenize(remove_html(snippet.get('snippet', ''))): - if text in sent: - new_text = sent - return { - 'citation_statement': snippet['snippet'].replace('', '').replace('', ''), - 'text': new_text, - 'from': snippet['source'], - 'supporting': snippet['target'], - 'source_title': remove_html(doc[2] or ''), - 'source_link': f"https://scite.ai/reports/{doc[0]}" - } - if text in remove_html(doc[3]): - if matched and remove_html(doc[3]).strip() != matched.strip(): - continue - new_text = text - sent_loc = None - sents = nltk.sent_tokenize(remove_html(doc[3])) - for i, sent in enumerate(sents): - if text in sent: - new_text = sent - sent_loc = i - - context = remove_html(doc[3]).replace('', '').replace('', '') - if sent_loc: - context_len = 3 - sent_beg = sent_loc - context_len - if sent_beg <= 0: sent_beg = 0 - sent_end = sent_loc + context_len - if sent_end >= len(sents): - sent_end = len(sents) - context = ''.join(sents[sent_beg:sent_end]) - - return { - 'citation_statement': context, - 'text': new_text, - 'from': doc[0], - 'supporting': doc[0], - 'source_title': remove_html(doc[2] or ''), - 'source_link': f"https://scite.ai/reports/{doc[0]}" - } - return None - - -# @st.experimental_singleton -# def init_models(): -# nltk.download('stopwords') -# nltk.download('punkt') -# from nltk.corpus import stopwords -# stop = set(stopwords.words('english') + list(string.punctuation)) -# device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -# question_answerer = pipeline( -# "question-answering", model='nlpconnect/roberta-base-squad2-nq', -# device=0 if torch.cuda.is_available() else -1, handle_impossible_answer=False, -# ) -# reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device=device) -# # queryexp_tokenizer = AutoTokenizer.from_pretrained("doc2query/all-with_prefix-t5-base-v1") -# # queryexp_model = AutoModelWithLMHead.from_pretrained("doc2query/all-with_prefix-t5-base-v1") -# return question_answerer, reranker, stop, device - -# qa_model, reranker, stop, device = init_models() # queryexp_model, queryexp_tokenizer - - -def clean_query(query, strict=True, clean=True): - operator = ' ' - if strict: - operator = ' AND ' - query = operator.join( - [i for i in query.lower().split(' ') if clean and i not in stop]) - if clean: - query = query.translate(str.maketrans('', '', string.punctuation)) - return query - - -def card(title, context, score, link, supporting): - st.markdown(f""" -
      -
      -
      -
      - - {context} - [Confidence: {score}%] - -
      - From {title} -
      -
      -
      - """, unsafe_allow_html=True) - html(f""" -
      - - """, width=None, height=42, scrolling=False) - - -st.title("Scientific Question Answering with Citations") - -st.write(""" -Ask a scientific question and get an answer drawn from [scite.ai](https://scite.ai) corpus of over 1.1bn citation statements. -Answers are linked to source documents containing citations where users can explore further evidence from scientific literature for the answer. - -For example try: Do tanning beds cause cancer? -""") - -st.markdown(""" - -""", unsafe_allow_html=True) - -# with st.expander("Settings (strictness, context limit, top hits)"): -# concat_passages = st.radio( -# "Concatenate passages as one long context?", -# ('yes', 'no')) -# present_impossible = st.radio( -# "Present impossible answers? (if the model thinks its impossible to answer should it still try?)", -# ('yes', 'no')) -# support_all = st.radio( -# "Use abstracts and titles as a ranking signal (if the words are matched in the abstract then the document is more relevant)?", -# ('no', 'yes')) -# support_abstracts = st.radio( -# "Use abstracts as a source document?", -# ('yes', 'no', 'abstract only')) -# strict_lenient_mix = st.radio( -# "Type of strict+lenient combination: Fallback or Mix? If fallback, strict is run first then if the results are less than context_lim we also search lenient. Mix will search them both and let reranking sort em out", -# ('mix', 'fallback')) -# confidence_threshold = st.slider('Confidence threshold for answering questions? This number represents how confident the model should be in the answers it gives. The number is out of 100%', 0, 100, 1) -# use_reranking = st.radio( -# "Use Reranking? Reranking will rerank the top hits using semantic similarity of document and query.", -# ('yes', 'no')) -# top_hits_limit = st.slider('Top hits? How many documents to use for reranking. Larger is slower but higher quality', 10, 300, 100) -# context_lim = st.slider('Context limit? How many documents to use for answering from. Larger is slower but higher quality', 10, 300, 25) - -# def paraphrase(text, max_length=128): -# input_ids = queryexp_tokenizer.encode(text, return_tensors="pt", add_special_tokens=True) -# generated_ids = queryexp_model.generate(input_ids=input_ids, num_return_sequences=suggested_queries or 5, num_beams=suggested_queries or 5, max_length=max_length) -# queries = set([queryexp_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]) -# preds = '\n * '.join(queries) -# return preds - - -def group_results_by_context(results): - result_groups = {} - for result in results: - if result['context'] not in result_groups: - result_groups[result['context']] = result - result_groups[result['context']]['texts'] = [] - - result_groups[result['context']]['texts'].append( - result['answer'] - ) - if result['score'] > result_groups[result['context']]['score']: - result_groups[result['context']]['score'] = result['score'] - return list(result_groups.values()) - - -def matched_context(start_i, end_i, contexts_string, seperator='---'): - # find seperators to identify start and end - doc_starts = [0] - for match in re.finditer(seperator, contexts_string): - doc_starts.append(match.end()) - - for i in range(len(doc_starts)): - if i == len(doc_starts) - 1: - if start_i >= doc_starts[i]: - return contexts_string[doc_starts[i]:len(contexts_string)].replace(seperator, '') - - if start_i >= doc_starts[i] and end_i <= doc_starts[i+1]: - return contexts_string[doc_starts[i]:doc_starts[i+1]].replace(seperator, '') - return None - - -# def run_query_full(query, progress_bar): -# # if use_query_exp == 'yes': -# # query_exp = paraphrase(f"question2question: {query}") -# # st.markdown(f""" -# # If you are not getting good results try one of: -# # * {query_exp} -# # """) - -# # could also try fallback if there are no good answers by score... -# limit = top_hits_limit or 100 -# context_limit = context_lim or 10 -# contexts_strict, orig_docs_strict = search(query, limit=limit, strict=True, all_mode=support_all == 'yes', abstracts= support_abstracts == 'yes', abstract_only=support_abstracts == 'abstract only') -# if strict_lenient_mix == 'fallback' and len(contexts_strict) < context_limit: -# contexts_lenient, orig_docs_lenient = search(query, limit=limit, strict=False, all_mode=support_all == 'yes', abstracts= support_abstracts == 'yes', abstract_only= support_abstracts == 'abstract only') -# contexts = list( -# set(contexts_strict + contexts_lenient) -# ) -# orig_docs = orig_docs_strict + orig_docs_lenient -# elif strict_lenient_mix == 'mix': -# contexts_lenient, orig_docs_lenient = search(query, limit=limit, strict=False) -# contexts = list( -# set(contexts_strict + contexts_lenient) -# ) -# orig_docs = orig_docs_strict + orig_docs_lenient -# else: -# contexts = list( -# set(contexts_strict) -# ) -# orig_docs = orig_docs_strict -# progress_bar.progress(25) - -# if len(contexts) == 0 or not ''.join(contexts).strip(): -# return st.markdown(""" -#
      -#
      -#
      - # Sorry... no results for that question! Try another... - #
      - #
      - #
      - # """, unsafe_allow_html=True) - - # if use_reranking == 'yes': - # sentence_pairs = [[query, context] for context in contexts] - # scores = reranker.predict(sentence_pairs, batch_size=len(sentence_pairs), show_progress_bar=False) - # hits = {contexts[idx]: scores[idx] for idx in range(len(scores))} - # sorted_contexts = [k for k,v in sorted(hits.items(), key=lambda x: x[0], reverse=True)] - # contexts = sorted_contexts[:context_limit] - # else: - # contexts = contexts[:context_limit] - - # progress_bar.progress(50) - # if concat_passages == 'yes': - # context = '\n---'.join(contexts) - # model_results = qa_model(question=query, context=context, top_k=10, doc_stride=512 // 2, max_answer_len=128, max_seq_len=512, handle_impossible_answer=present_impossible=='yes') - # else: - # context = ['\n---\n'+ctx for ctx in contexts] - # model_results = qa_model(question=[query]*len(contexts), context=context, handle_impossible_answer=present_impossible=='yes') - - # results = [] - - # progress_bar.progress(75) - # for i, result in enumerate(model_results): - # if concat_passages == 'yes': - # matched = matched_context(result['start'], result['end'], context) - # else: - # matched = matched_context(result['start'], result['end'], context[i]) - # support = find_source(result['answer'], orig_docs, matched) - # if not support: - # continue - # results.append({ - # "answer": support['text'], - # "title": support['source_title'], - # "link": support['source_link'], - # "context": support['citation_statement'], - # "score": result['score'], - # "doi": support["supporting"] - # }) - - # grouped_results = group_results_by_context(results) - # sorted_result = sorted(grouped_results, key=lambda x: x['score'], reverse=True) - - # if confidence_threshold == 0: - # threshold = 0 - # else: - # threshold = (confidence_threshold or 10) / 100 - - # sorted_result = list(filter( - # lambda x: x['score'] > threshold, - # sorted_result - # )) - - # progress_bar.progress(100) - # for r in sorted_result: - # ctx = remove_html(r["context"]) - # for answer in r['texts']: - # ctx = ctx.replace(answer.strip(), f"{answer.strip()}") - # # .replace( ' -
      -
      - Sorry... no results for that question! Try another... -
      -
      -
      - """, unsafe_allow_html=True) - - for r in resp['results']: - ctx = remove_html(r["context"]) - for answer in r['texts']: - ctx = ctx.replace(answer.strip(), f"{answer.strip()}") - # .replace( ' b + 2 * std): - warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " - "The distribution of values may be incorrect.", - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - l = norm_cdf((a - mean) / std) - u = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * l - 1, 2 * u - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor: an n-dimensional `torch.Tensor` - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - a: the minimum cutoff value - b: the maximum cutoff value - Examples: - >>> w = torch.empty(3, 5) - >>> nn.init.trunc_normal_(w) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/sanskrit.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/sanskrit.py deleted file mode 100644 index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000 --- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/sanskrit.py +++ /dev/null @@ -1,62 +0,0 @@ -import re -from indic_transliteration import sanscript - - -# List of (iast, ipa) pairs: -_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('a', 'ə'), - ('ā', 'aː'), - ('ī', 'iː'), - ('ū', 'uː'), - ('ṛ', 'ɹ`'), - ('ṝ', 'ɹ`ː'), - ('ḷ', 'l`'), - ('ḹ', 'l`ː'), - ('e', 'eː'), - ('o', 'oː'), - ('k', 'k⁼'), - ('k⁼h', 'kʰ'), - ('g', 'g⁼'), - ('g⁼h', 'gʰ'), - ('ṅ', 'ŋ'), - ('c', 'ʧ⁼'), - ('ʧ⁼h', 'ʧʰ'), - ('j', 'ʥ⁼'), - ('ʥ⁼h', 'ʥʰ'), - ('ñ', 'n^'), - ('ṭ', 't`⁼'), - ('t`⁼h', 't`ʰ'), - ('ḍ', 'd`⁼'), - ('d`⁼h', 'd`ʰ'), - ('ṇ', 'n`'), - ('t', 't⁼'), - ('t⁼h', 'tʰ'), - ('d', 'd⁼'), - ('d⁼h', 'dʰ'), - ('p', 'p⁼'), - ('p⁼h', 'pʰ'), - ('b', 'b⁼'), - ('b⁼h', 'bʰ'), - ('y', 'j'), - ('ś', 'ʃ'), - ('ṣ', 's`'), - ('r', 'ɾ'), - ('l̤', 'l`'), - ('h', 'ɦ'), - ("'", ''), - ('~', '^'), - ('ṃ', '^') -]] - - -def devanagari_to_ipa(text): - text = text.replace('ॐ', 'ओम्') - text = re.sub(r'\s*।\s*$', '.', text) - text = re.sub(r'\s*।\s*', ', ', text) - text = re.sub(r'\s*॥', '.', text) - text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST) - for regex, replacement in _iast_to_ipa: - text = re.sub(regex, replacement, text) - text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0) - [:-1]+'h'+x.group(1)+'*', text) - return text diff --git a/spaces/ealbinu/automatic-speech-recognition/app.py b/spaces/ealbinu/automatic-speech-recognition/app.py deleted file mode 100644 index 9746c32bf5175cd7a8d5dace31ae5581a24df98b..0000000000000000000000000000000000000000 --- a/spaces/ealbinu/automatic-speech-recognition/app.py +++ /dev/null @@ -1,391 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# References: -# https://gradio.app/docs/#dropdown - -import base64 -import logging -import os -import tempfile -import time -from datetime import datetime - -import gradio as gr -import torch -import torchaudio -import urllib.request - - -from examples import examples -from model import decode, get_pretrained_model, language_to_models, sample_rate - -languages = list(language_to_models.keys()) - - -def convert_to_wav(in_filename: str) -> str: - """Convert the input audio file to a wave file""" - out_filename = in_filename + ".wav" - logging.info(f"Converting '{in_filename}' to '{out_filename}'") - _ = os.system(f"ffmpeg -hide_banner -i '{in_filename}' -ar 16000 '{out_filename}'") - _ = os.system( - f"ffmpeg -hide_banner -loglevel error -i '{in_filename}' -ar 16000 '{out_filename}.flac'" - ) - - with open(out_filename + ".flac", "rb") as f: - s = "\n" + out_filename + "\n" - s += base64.b64encode(f.read()).decode() - logging.info(s) - - return out_filename - - -def build_html_output(s: str, style: str = "result_item_success"): - return f""" -
      -
      - {s} -
      -
      - """ - -def process_url( - language: str, - repo_id: str, - decoding_method: str, - num_active_paths: int, - url: str, -): - logging.info(f"Processing URL: {url}") - with tempfile.NamedTemporaryFile() as f: - try: - urllib.request.urlretrieve(url, f.name) - - return process( - in_filename=f.name, - language=language, - repo_id=repo_id, - decoding_method=decoding_method, - num_active_paths=num_active_paths, - ) - except Exception as e: - logging.info(str(e)) - return "", build_html_output(str(e), "result_item_error") - -def process_uploaded_file( - language: str, - repo_id: str, - decoding_method: str, - num_active_paths: int, - in_filename: str, -): - if in_filename is None or in_filename == "": - return "", build_html_output( - "Please first upload a file and then click " - 'the button "submit for recognition"', - "result_item_error", - ) - - logging.info(f"Processing uploaded file: {in_filename}") - try: - return process( - in_filename=in_filename, - language=language, - repo_id=repo_id, - decoding_method=decoding_method, - num_active_paths=num_active_paths, - ) - except Exception as e: - logging.info(str(e)) - return "", build_html_output(str(e), "result_item_error") - - -def process_microphone( - language: str, - repo_id: str, - decoding_method: str, - num_active_paths: int, - in_filename: str, -): - if in_filename is None or in_filename == "": - return "", build_html_output( - "Please first click 'Record from microphone', speak, " - "click 'Stop recording', and then " - "click the button 'submit for recognition'", - "result_item_error", - ) - - logging.info(f"Processing microphone: {in_filename}") - try: - return process( - in_filename=in_filename, - language=language, - repo_id=repo_id, - decoding_method=decoding_method, - num_active_paths=num_active_paths, - ) - except Exception as e: - logging.info(str(e)) - return "", build_html_output(str(e), "result_item_error") - - -@torch.no_grad() -def process( - language: str, - repo_id: str, - decoding_method: str, - num_active_paths: int, - in_filename: str, -): - logging.info(f"language: {language}") - logging.info(f"repo_id: {repo_id}") - logging.info(f"decoding_method: {decoding_method}") - logging.info(f"num_active_paths: {num_active_paths}") - logging.info(f"in_filename: {in_filename}") - - filename = convert_to_wav(in_filename) - - now = datetime.now() - date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f") - logging.info(f"Started at {date_time}") - - start = time.time() - - recognizer = get_pretrained_model( - repo_id, - decoding_method=decoding_method, - num_active_paths=num_active_paths, - ) - - text = decode(recognizer, filename) - - date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f") - end = time.time() - - metadata = torchaudio.info(filename) - duration = metadata.num_frames / sample_rate - rtf = (end - start) / duration - - logging.info(f"Finished at {date_time} s. Elapsed: {end - start: .3f} s") - - info = f""" - Wave duration : {duration: .3f} s
      - Processing time: {end - start: .3f} s
      - RTF: {end - start: .3f}/{duration: .3f} = {rtf:.3f}
      - """ - if rtf > 1: - info += ( - "
      We are loading the model for the first run. " - "Please run again to measure the real RTF.
      " - ) - - logging.info(info) - logging.info(f"\nrepo_id: {repo_id}\nhyp: {text}") - - return text, build_html_output(info) - - -title = "# Automatic Speech Recognition with Next-gen Kaldi" -description = """ -This space shows how to do automatic speech recognition with Next-gen Kaldi. - -Please visit - -for streaming speech recognition with **Next-gen Kaldi**. - -It is running on CPU within a docker container provided by Hugging Face. - -See more information by visiting the following links: - -- -- -- -- - -If you want to deploy it locally, please see - -""" - -# css style is copied from -# https://huggingface.co/spaces/alphacep/asr/blob/main/app.py#L113 -css = """ -.result {display:flex;flex-direction:column} -.result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%} -.result_item_success {background-color:mediumaquamarine;color:white;align-self:start} -.result_item_error {background-color:#ff7070;color:white;align-self:start} -""" - - -def update_model_dropdown(language: str): - if language in language_to_models: - choices = language_to_models[language] - return gr.Dropdown.update(choices=choices, value=choices[0]) - - raise ValueError(f"Unsupported language: {language}") - - -demo = gr.Blocks(css=css) - - -with demo: - gr.Markdown(title) - language_choices = list(language_to_models.keys()) - - language_radio = gr.Radio( - label="Language", - choices=language_choices, - value=language_choices[0], - ) - model_dropdown = gr.Dropdown( - choices=language_to_models[language_choices[0]], - label="Select a model", - value=language_to_models[language_choices[0]][0], - ) - - language_radio.change( - update_model_dropdown, - inputs=language_radio, - outputs=model_dropdown, - ) - - decoding_method_radio = gr.Radio( - label="Decoding method", - choices=["greedy_search", "modified_beam_search"], - value="greedy_search", - ) - - num_active_paths_slider = gr.Slider( - minimum=1, - value=4, - step=1, - label="Number of active paths for modified_beam_search", - ) - - with gr.Tabs(): - with gr.TabItem("Upload from disk"): - uploaded_file = gr.Audio( - source="upload", # Choose between "microphone", "upload" - type="filepath", - optional=False, - label="Upload from disk", - ) - upload_button = gr.Button("Submit for recognition") - uploaded_output = gr.Textbox(label="Recognized speech from uploaded file") - uploaded_html_info = gr.HTML(label="Info") - - gr.Examples( - examples=examples, - inputs=[ - language_radio, - model_dropdown, - decoding_method_radio, - num_active_paths_slider, - uploaded_file, - ], - outputs=[uploaded_output, uploaded_html_info], - fn=process_uploaded_file, - ) - - with gr.TabItem("Record from microphone"): - microphone = gr.Audio( - source="microphone", # Choose between "microphone", "upload" - type="filepath", - optional=False, - label="Record from microphone", - ) - - record_button = gr.Button("Submit for recognition") - recorded_output = gr.Textbox(label="Recognized speech from recordings") - recorded_html_info = gr.HTML(label="Info") - - gr.Examples( - examples=examples, - inputs=[ - language_radio, - model_dropdown, - decoding_method_radio, - num_active_paths_slider, - microphone, - ], - outputs=[recorded_output, recorded_html_info], - fn=process_microphone, - ) - - with gr.TabItem("From URL"): - url_textbox = gr.Textbox( - max_lines=1, - placeholder="URL to an audio file", - label="URL", - interactive=True, - ) - - url_button = gr.Button("Submit for recognition") - url_output = gr.Textbox(label="Recognized speech from URL") - url_html_info = gr.HTML(label="Info") - - upload_button.click( - process_uploaded_file, - inputs=[ - language_radio, - model_dropdown, - decoding_method_radio, - num_active_paths_slider, - uploaded_file, - ], - outputs=[uploaded_output, uploaded_html_info], - ) - - record_button.click( - process_microphone, - inputs=[ - language_radio, - model_dropdown, - decoding_method_radio, - num_active_paths_slider, - microphone, - ], - outputs=[recorded_output, recorded_html_info], - ) - - url_button.click( - process_url, - inputs=[ - language_radio, - model_dropdown, - decoding_method_radio, - num_active_paths_slider, - url_textbox, - ], - outputs=[url_output, url_html_info], - ) - - gr.Markdown(description) - -torch.set_num_threads(1) -torch.set_num_interop_threads(1) - -torch._C._jit_set_profiling_executor(False) -torch._C._jit_set_profiling_mode(False) -torch._C._set_graph_executor_optimize(False) - -if __name__ == "__main__": - formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - - logging.basicConfig(format=formatter, level=logging.INFO) - - demo.launch() diff --git a/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/__init__.py b/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/epsilonator/euclidean_distance/README.md b/spaces/epsilonator/euclidean_distance/README.md deleted file mode 100644 index 7766cb06314cd2b18b9d290a0b39d5a7828e9018..0000000000000000000000000000000000000000 --- a/spaces/epsilonator/euclidean_distance/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Euclidean Distance -emoji: 🏃 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.7 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/evaluate-metric/exact_match/exact_match.py b/spaces/evaluate-metric/exact_match/exact_match.py deleted file mode 100644 index d8c499b3722b0bdbbf3d8a7e3d48899513f27d19..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/exact_match/exact_match.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Exact Match metric.""" -import re -import string - -import datasets -import numpy as np - -import evaluate - - -_DESCRIPTION = """ -Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. -""" - -_KWARGS_DESCRIPTION = """ -Args: - predictions: List of predicted texts. - references: List of reference texts. - regexes_to_ignore: List, defaults to None. Regex expressions of characters to - ignore when calculating the exact matches. Note: these regexes are removed - from the input data before the changes based on the options below (e.g. ignore_case, - ignore_punctuation, ignore_numbers) are applied. - ignore_case: Boolean, defaults to False. If true, turns everything - to lowercase so that capitalization differences are ignored. - ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before - comparing predictions and references. - ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before - comparing predictions and references. -Returns: - exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 1.0, inclusive. -Examples: - >>> exact_match = evaluate.load("exact_match") - >>> refs = ["the cat", "theater", "YELLING", "agent007"] - >>> preds = ["cat?", "theater", "yelling", "agent"] - >>> results = exact_match.compute(references=refs, predictions=preds) - >>> print(round(results["exact_match"], 2)) - 0.25 - - >>> exact_match = evaluate.load("exact_match") - >>> refs = ["the cat", "theater", "YELLING", "agent007"] - >>> preds = ["cat?", "theater", "yelling", "agent"] - >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) - >>> print(round(results["exact_match"], 2)) - 0.5 - - - >>> exact_match = evaluate.load("exact_match") - >>> refs = ["the cat", "theater", "YELLING", "agent007"] - >>> preds = ["cat?", "theater", "yelling", "agent"] - >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) - >>> print(round(results["exact_match"], 2)) - 0.75 - - >>> exact_match = evaluate.load("exact_match") - >>> refs = ["the cat", "theater", "YELLING", "agent007"] - >>> preds = ["cat?", "theater", "yelling", "agent"] - >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) - >>> print(round(results["exact_match"], 2)) - 1.0 - - >>> exact_match = evaluate.load("exact_match") - >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."] - >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."] - >>> results = exact_match.compute(references=refs, predictions=preds) - >>> print(round(results["exact_match"], 2)) - 0.33 -""" - -_CITATION = """ -""" - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class ExactMatch(evaluate.Metric): - def _info(self): - return evaluate.MetricInfo( - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - features=datasets.Features( - { - "predictions": datasets.Value("string", id="sequence"), - "references": datasets.Value("string", id="sequence"), - } - ), - reference_urls=[], - ) - - def _compute( - self, - predictions, - references, - regexes_to_ignore=None, - ignore_case=False, - ignore_punctuation=False, - ignore_numbers=False, - ): - - if regexes_to_ignore is not None: - for s in regexes_to_ignore: - predictions = np.array([re.sub(s, "", x) for x in predictions]) - references = np.array([re.sub(s, "", x) for x in references]) - else: - predictions = np.asarray(predictions) - references = np.asarray(references) - - if ignore_case: - predictions = np.char.lower(predictions) - references = np.char.lower(references) - - if ignore_punctuation: - repl_table = string.punctuation.maketrans("", "", string.punctuation) - predictions = np.char.translate(predictions, table=repl_table) - references = np.char.translate(references, table=repl_table) - - if ignore_numbers: - repl_table = string.digits.maketrans("", "", string.digits) - predictions = np.char.translate(predictions, table=repl_table) - references = np.char.translate(references, table=repl_table) - - score_list = predictions == references - - return {"exact_match": np.mean(score_list)} diff --git a/spaces/failfast/2D-GameCreator/src/lib/theme.ts b/spaces/failfast/2D-GameCreator/src/lib/theme.ts deleted file mode 100644 index e31ce5292b1103b7ef462c7b1a7efabc255508fa..0000000000000000000000000000000000000000 --- a/spaces/failfast/2D-GameCreator/src/lib/theme.ts +++ /dev/null @@ -1,79 +0,0 @@ -import { Fira_Code, Poppins } from "next/font/google"; -import { experimental_extendTheme as extendTheme, Theme } from "@mui/material/styles"; - -export const poppins = Poppins({ - weight: ["300", "400", "500", "700"], - subsets: ["latin"], - display: "swap", - fallback: ["Helvetica", "Arial", "sans-serif"], -}); - -const theme = extendTheme({ - colorSchemes: { - light: { - palette: { - primary: { - main: "#2c90fc", - }, - secondary: { - main: "#b827fc", - }, - }, - }, - dark: { - palette: { - primary: { - main: "#2c90fc", - }, - secondary: { - main: "#b827fc", - }, - text: { - secondary: "#ffffff", - }, - }, - }, - }, - typography: { - ...poppins.style, - h1: { - fontSize: "5em", - }, - }, - components: { - MuiLink: { - styleOverrides: { - root: { - textDecoration: "none", - ":hover": { - textDecoration: "underline", - }, - }, - }, - }, - MuiListSubheader: { - styleOverrides: { - root: { - fontSize: "1.35rem", - }, - }, - }, - MuiButton: { - styleOverrides: { - startIcon: ({ ownerState }) => ({ - ...(ownerState.children - ? {} - : { - // if no button label, center icon (e.g mobile) - marginRight: 0, - }), - }), - }, - }, - }, -}); - -export default theme; -export const fontMono = Fira_Code({ - subsets: ["latin"], -}); diff --git a/spaces/falterWliame/Face_Mask_Detection/Crack WORK >> Harry Potter And The Deathly Hallows - Razor 1911 Hack Tool.md b/spaces/falterWliame/Face_Mask_Detection/Crack WORK >> Harry Potter And The Deathly Hallows - Razor 1911 Hack Tool.md deleted file mode 100644 index caab60704f79e9da02074b910070d03d47622583..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Crack WORK >> Harry Potter And The Deathly Hallows - Razor 1911 Hack Tool.md +++ /dev/null @@ -1,6 +0,0 @@ -

      CRACK >> Harry Potter And The Deathly Hallows - Razor 1911 hack tool


      DOWNLOAD ⇒⇒⇒ https://urlca.com/2uDcpE



      -
      -This activation code allows you to legally activate, download and play the 'Harry Potter And the Deathly Hallows, Part 1' game. Key Features. Action Hero: The ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/queue.h b/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/queue.h deleted file mode 100644 index a21f3446e06b5826af7b554c8a7d9c5d80848b62..0000000000000000000000000000000000000000 --- a/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/queue.h +++ /dev/null @@ -1,216 +0,0 @@ -#pragma once - -#include -#include -#include // [[since C++14]]: std::exchange -#include -#include -#include -#include -#include -#include -#include // assert - -#include "libipc/def.h" -#include "libipc/shm.h" -#include "libipc/rw_lock.h" - -#include "libipc/utility/log.h" -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" - -namespace ipc { -namespace detail { - -class queue_conn { -protected: - circ::cc_t connected_ = 0; - shm::handle elems_h_; - - template - Elems* open(char const * name) { - if (name == nullptr || name[0] == '\0') { - ipc::error("fail open waiter: name is empty!\n"); - return nullptr; - } - if (!elems_h_.acquire(name, sizeof(Elems))) { - return nullptr; - } - auto elems = static_cast(elems_h_.get()); - if (elems == nullptr) { - ipc::error("fail acquire elems: %s\n", name); - return nullptr; - } - elems->init(); - return elems; - } - - void close() { - elems_h_.release(); - } - -public: - queue_conn() = default; - queue_conn(const queue_conn&) = delete; - queue_conn& operator=(const queue_conn&) = delete; - - bool connected() const noexcept { - return connected_ != 0; - } - - circ::cc_t connected_id() const noexcept { - return connected_; - } - - template - auto connect(Elems* elems) noexcept - /*needs 'optional' here*/ - -> std::tuple().cursor())> { - if (elems == nullptr) return {}; - // if it's already connected, just return - if (connected()) return {connected(), false, 0}; - connected_ = elems->connect_receiver(); - return {connected(), true, elems->cursor()}; - } - - template - bool disconnect(Elems* elems) noexcept { - if (elems == nullptr) return false; - // if it's already disconnected, just return false - if (!connected()) return false; - elems->disconnect_receiver(std::exchange(connected_, 0)); - return true; - } -}; - -template -class queue_base : public queue_conn { - using base_t = queue_conn; - -public: - using elems_t = Elems; - using policy_t = typename elems_t::policy_t; - -protected: - elems_t * elems_ = nullptr; - decltype(std::declval().cursor()) cursor_ = 0; - bool sender_flag_ = false; - -public: - using base_t::base_t; - - queue_base() = default; - - explicit queue_base(char const * name) - : queue_base{} { - elems_ = open(name); - } - - explicit queue_base(elems_t * elems) noexcept - : queue_base{} { - assert(elems != nullptr); - elems_ = elems; - } - - /* not virtual */ ~queue_base() { - base_t::close(); - } - - elems_t * elems() noexcept { return elems_; } - elems_t const * elems() const noexcept { return elems_; } - - bool ready_sending() noexcept { - if (elems_ == nullptr) return false; - return sender_flag_ || (sender_flag_ = elems_->connect_sender()); - } - - void shut_sending() noexcept { - if (elems_ == nullptr) return; - if (!sender_flag_) return; - elems_->disconnect_sender(); - } - - bool connect() noexcept { - auto tp = base_t::connect(elems_); - if (std::get<0>(tp) && std::get<1>(tp)) { - cursor_ = std::get<2>(tp); - return true; - } - return std::get<0>(tp); - } - - bool disconnect() noexcept { - return base_t::disconnect(elems_); - } - - std::size_t conn_count() const noexcept { - return (elems_ == nullptr) ? static_cast(invalid_value) : elems_->conn_count(); - } - - bool valid() const noexcept { - return elems_ != nullptr; - } - - bool empty() const noexcept { - return !valid() || (cursor_ == elems_->cursor()); - } - - template - bool push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

      (params)...); - }); - } - - template - bool force_push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->force_push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

      (params)...); - }); - } - - template - bool pop(T& item, F&& out) { - if (elems_ == nullptr) { - return false; - } - return elems_->pop(this, &(this->cursor_), [&item](void* p) { - ::new (&item) T(std::move(*static_cast(p))); - }, std::forward(out)); - } -}; - -} // namespace detail - -template -class queue final : public detail::queue_base> { - using base_t = detail::queue_base>; - -public: - using value_t = T; - - using base_t::base_t; - - template - bool push(P&&... params) { - return base_t::template push(std::forward

      (params)...); - } - - template - bool force_push(P&&... params) { - return base_t::template force_push(std::forward

      (params)...); - } - - bool pop(T& item) { - return base_t::pop(item, [](bool) {}); - } - - template - bool pop(T& item, F&& out) { - return base_t::pop(item, std::forward(out)); - } -}; - -} // namespace ipc diff --git a/spaces/fb700/chat3/request_llm/bridge_chatgpt.py b/spaces/fb700/chat3/request_llm/bridge_chatgpt.py deleted file mode 100644 index 9c4c3d2742e4f22773148d6e3dbc1f9c8b618122..0000000000000000000000000000000000000000 --- a/spaces/fb700/chat3/request_llm/bridge_chatgpt.py +++ /dev/null @@ -1,266 +0,0 @@ -# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目 - -""" - 该文件中主要包含三个函数 - - 不具备多线程能力的函数: - 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 - - 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 -""" - -import json -import time -import gradio as gr -import logging -import traceback -import requests -import importlib - -# config_private.py放自己的秘密如API和代理网址 -# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, update_ui, is_any_api_key, select_api_key -proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \ - get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY') - -timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ - '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' - -def get_full_error(chunk, stream_response): - """ - 获取完整的从Openai返回的报错 - """ - while True: - try: - chunk += next(stream_response) - except: - break - return chunk - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): - """ - 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - chatGPT的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=False - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS); break - except requests.exceptions.ReadTimeout as e: - retry += 1 - traceback.print_exc() - if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - - stream_response = response.iter_lines() - result = '' - while True: - try: chunk = next(stream_response).decode() - except StopIteration: - break - except requests.exceptions.ConnectionError: - chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。 - if len(chunk)==0: continue - if not chunk.startswith('data:'): - error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode() - if "reduce the length" in error_msg: - raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) - else: - raise RuntimeError("OpenAI拒绝了请求:" + error_msg) - json_data = json.loads(chunk.lstrip('data:'))['choices'][0] - delta = json_data["delta"] - if len(delta) == 0: break - if "role" in delta: continue - if "content" in delta: - result += delta["content"] - if not console_slience: print(delta["content"], end='') - if observe_window is not None: - # 观测窗,把已经获取的数据显示出去 - if len(observe_window) >= 1: observe_window[0] += delta["content"] - # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("用户取消了程序。") - else: raise RuntimeError("意外Json结构:"+delta) - if json_data['finish_reason'] == 'length': - raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。") - return result - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 发送至chatGPT,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是chatGPT的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - if is_any_api_key(inputs): - chatbot._cookies['api_key'] = inputs - chatbot.append(("输入已识别为openai的api_key", "api_key已导入")) - yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面 - return - elif not is_any_api_key(chatbot._cookies['api_key']): - chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")) - yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面 - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - raw_input = inputs - logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - - try: - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) - except RuntimeError as e: - chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。") - yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 - return - - history.append(inputs); history.append(" ") - - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=True - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS);break - except: - retry += 1 - chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) - retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else "" - yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面 - if retry > MAX_RETRY: raise TimeoutError - - gpt_replying_buffer = "" - - is_head_of_the_stream = True - if stream: - stream_response = response.iter_lines() - while True: - chunk = next(stream_response) - # print(chunk.decode()[6:]) - if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()): - # 数据流的第一帧不携带content - is_head_of_the_stream = False; continue - - if chunk: - try: - if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0: - # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {gpt_replying_buffer}') - break - # 处理数据流的主体 - chunkjson = json.loads(chunk.decode()[6:]) - status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}" - # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 - gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"] - history[-1] = gpt_replying_buffer - chatbot[-1] = (history[-2], history[-1]) - yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 - - except Exception as e: - traceback.print_exc() - yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 - chunk = get_full_error(chunk, stream_response) - error_msg = chunk.decode() - if "reduce the length" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.") - history = [] # 清除历史 - elif "Incorrect API key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由,拒绝服务.") - elif "exceeded your current quota" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.") - elif "bad forward key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") - else: - from toolbox import regular_txt_to_markdown - tb_str = '```\n' + traceback.format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}") - yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - return - -def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): - """ - 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 - """ - if not is_any_api_key(llm_kwargs['api_key']): - raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。") - - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}" - } - - conversation_cnt = len(history) // 2 - - messages = [{"role": "system", "content": system_prompt}] - if conversation_cnt: - for index in range(0, 2*conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - - payload = { - "model": llm_kwargs['llm_model'].strip('api2d-'), - "messages": messages, - "temperature": llm_kwargs['temperature'], # 1.0, - "top_p": llm_kwargs['top_p'], # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - try: - print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") - except: - print('输入中可能存在乱码。') - return headers,payload - - diff --git a/spaces/fclong/summary/fengshen/examples/clue1.1/README.md b/spaces/fclong/summary/fengshen/examples/clue1.1/README.md deleted file mode 100644 index 63856c5a596db8f968a7dcebcc03d85ff8c3a49f..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/clue1.1/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# 中文 NLP 权威测评基准 CLUE 刷榜 Top10 方案指南 - - [CLUE](https://www.cluebenchmarks.com) 是中文 NLP 的权威测评榜单,也吸引了许多国内许多团队在上面进行测评。在我们的最新模型 UniMC 中,也使用 CLUE 对我们的模型进行了测评。在全量数据榜单 CLUE1.1 中,我们的 [UniMC-DeBERTa-1.4B](https://huggingface.co/IDEA-CCNL/Erlangshen-UniMC-DeBERTa-v2-1.4B-Chinese) 模型取得了第 8 的成绩,是 [CLUE1.1](https://www.cluebenchmarks.com/rank.html) 排行榜(2022年11月14日)前 10 名中唯一开源模型权重和刷榜代码的模型。 - -## 刷榜方案 - -通过观察可以发现,在CLUE需要测评的 9 个任务中,有 8 个是分类任务,只有一个 cmrc2018 是抽取式的阅读理解任务。因此,结合我们的 Fengshenbang-LM 已有的模型,我们可以使用 [UniMC](https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/dev/yangping/fengshen/examples/unimc) 来实现 8 个是分类任务,用 [Ubert](https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/dev/yangping/fengshen/examples/ubert) 来实现 cmrc2018 任务,详细的方案可以看我们的知乎文章:https://zhuanlan.zhihu.com/p/583679722 - -## 项目要求 - -安装我们的 fengshen 框架,我们暂且提供如下方式安装 -```shell -git clone https://github.com/IDEA-CCNL/Fengshenbang-LM.git -cd Fengshenbang-LM -pip install --editable ./ -``` -## 运行项目 - -### 数据下载 -由于 HuggingFace 上的数据与最终提交的数据 id 有可能对应不上,所以建议还是去官方仓库进行下载 -https://github.com/CLUEBENCHMARK/CLUE - - -### 数据预处理 -将数据下载之后,修改下面脚本的路径,运行下面脚本将数据处理成 UniMC 模型 和 Ubert 模型所需要的格式 -```shell -sh cluedata2unidata.sh -``` - -### 模型训练 -训练CLUE上的8个分类任务,一些训练参数可根据自己的设备进行修改。对于全量数据来说,训练超参数没有那么大的影响 -```shell -sh run_clue_unimc.sh -``` -训练 cmrc2018 任务,一些训练参数可根据自己的设备进行修改 -```shell -sh run_clue_ubert.sh -``` - -### 预测结果提交 - -运行下面脚本将预测结果转化为 CLUE 要求的格式,数据路径需要根据自己的路径修改调整。运行下面脚本就可以得到结果,然后拿到 [CLUE](https://www.cluebenchmarks.com/index.html) 官网上去提交了 - -```shell -sh predict2submit.sh -``` - - diff --git a/spaces/fclong/summary/fengshen/examples/zen1_finetune/fengshen_token_level_ft_task.py b/spaces/fclong/summary/fengshen/examples/zen1_finetune/fengshen_token_level_ft_task.py deleted file mode 100644 index 8cb77bbe0edf675300614982466e802964f8c625..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/zen1_finetune/fengshen_token_level_ft_task.py +++ /dev/null @@ -1,647 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from fengshen.models.zen1.ngram_utils import ZenNgramDict -from fengshen.models.zen1.modeling import ZenForTokenClassification -from fengshen.metric.metric import SeqEntityScore -from fengshen.models.zen1.tokenization import BertTokenizer -from random import shuffle -from pytorch_lightning.callbacks import LearningRateMonitor -from dataclasses import dataclass -import logging -import math -import numpy as np -import os -import json -import torch -import pytorch_lightning as pl -import argparse -from pytorch_lightning.callbacks import ModelCheckpoint -from torch.utils.data import Dataset, DataLoader - -import torch.nn.functional as F -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt='%m/%d/%Y %H:%M:%S', - level=logging.ERROR) -logger = logging.getLogger(__name__) - - -class InputExample(object): - """A single training/test example for simple sequence classification.""" - - def __init__(self, guid, text_a, text_b=None, label=None): - """Constructs a InputExample. - - Args: - guid: Unique id for the example. - text_a: string. The untokenized text of the first sequence. For single - sequence tasks, only this sequence must be specified. - text_b: (Optional) string. The untokenized text of the second sequence. - Only must be specified for sequence pair tasks. - label: (Optional) string. The label of the example. This should be - specified for train and dev examples, but not for test examples. - """ - self.guid = guid - self.text_a = text_a - self.text_b = text_b - self.label = label - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths, - ngram_tuples, ngram_seg_ids, ngram_masks, valid_ids=None, label_mask=None): - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.label_id = label_id - self.valid_ids = valid_ids - self.label_mask = label_mask - - self.ngram_ids = ngram_ids - self.ngram_positions = ngram_positions - self.ngram_lengths = ngram_lengths - self.ngram_tuples = ngram_tuples - self.ngram_seg_ids = ngram_seg_ids - self.ngram_masks = ngram_masks - - -def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict): - """Loads a data file into a list of `InputBatch`s.""" - - # label_map = {label: i for i, label in enumerate(label_list, 1)} - - features = [] - for (ex_index, example) in enumerate(examples): - textlist = example.text_a - labellist = example.label - tokens = [] - labels = [] - valid = [] - label_mask = [] - for i, word in enumerate(textlist): - token = tokenizer.tokenize(word) - tokens.extend(token) - label_1 = labellist[i] - for m in range(len(token)): - if m == 0: - labels.append(label_1) - valid.append(1) - label_mask.append(1) - else: - valid.append(0) - if len(tokens) >= max_seq_length - 1: - tokens = tokens[0:(max_seq_length - 2)] - labels = labels[0:(max_seq_length - 2)] - valid = valid[0:(max_seq_length - 2)] - label_mask = label_mask[0:(max_seq_length - 2)] - ntokens = [] - segment_ids = [] - label_ids = [] - ntokens.append("[CLS]") - segment_ids.append(0) - valid.insert(0, 1) - label_mask.insert(0, 1) - label_ids.append(label_map["[CLS]"]) - for i, token in enumerate(tokens): - ntokens.append(token) - segment_ids.append(0) - if len(labels) > i: - label_ids.append(label_map[labels[i]]) - ntokens.append("[SEP]") - segment_ids.append(0) - valid.append(1) - label_mask.append(1) - label_ids.append(label_map["[SEP]"]) - input_ids = tokenizer.convert_tokens_to_ids(ntokens) - input_mask = [1] * len(input_ids) - label_mask = [1] * len(label_ids) - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - label_ids.append(0) - valid.append(1) - label_mask.append(0) - while len(label_ids) < max_seq_length: - label_ids.append(0) - label_mask.append(0) - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - assert len(label_ids) == max_seq_length - assert len(valid) == max_seq_length - assert len(label_mask) == max_seq_length - - # ----------- code for ngram BEGIN----------- - ngram_matches = [] - # Filter the ngram segment from 2 to 7 to check whether there is a ngram - for p in range(2, 8): - for q in range(0, len(tokens) - p + 1): - character_segment = tokens[q:q + p] - # j is the starting position of the ngram - # i is the length of the current ngram - character_segment = tuple(character_segment) - if character_segment in ngram_dict.ngram_to_id_dict: - ngram_index = ngram_dict.ngram_to_id_dict[character_segment] - ngram_matches.append([ngram_index, q, p, character_segment]) - - shuffle(ngram_matches) - - max_ngram_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq) - if len(ngram_matches) > max_ngram_in_seq_proportion: - ngram_matches = ngram_matches[:max_ngram_in_seq_proportion] - - ngram_ids = [ngram[0] for ngram in ngram_matches] - ngram_positions = [ngram[1] for ngram in ngram_matches] - ngram_lengths = [ngram[2] for ngram in ngram_matches] - ngram_tuples = [ngram[3] for ngram in ngram_matches] - ngram_seg_ids = [0 if position < (len(tokens) + 2) else 1 for position in ngram_positions] - - ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool) - ngram_mask_array[:len(ngram_ids)] = 1 - - # record the masked positions - ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32) - for i in range(len(ngram_ids)): - ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = 1.0 - - # Zero-pad up to the max ngram in seq length. - padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids)) - ngram_ids += padding - ngram_lengths += padding - ngram_seg_ids += padding - - # ----------- code for ngram END----------- - - features.append( - InputFeatures(input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - label_id=label_ids, - ngram_ids=ngram_ids, - ngram_positions=ngram_positions_matrix, - ngram_lengths=ngram_lengths, - ngram_tuples=ngram_tuples, - ngram_seg_ids=ngram_seg_ids, - ngram_masks=ngram_mask_array, - valid_ids=valid, - label_mask=label_mask)) - return features - - -class DataProcessor(object): - """Base class for data converters for sequence classification data sets.""" - - def get_examples(self, data_path, set_type, quotechar=' '): - """See base class.""" - return self._create_examples( - self._read_tsv(data_path, self.get_quotechar()), set_type) - - def _create_examples(self, lines, set_type): - examples = [] - for i, (sentence, label) in enumerate(lines): - guid = "%s-%s" % (set_type, i) - text_a = sentence - label = label - examples.append(InputExample(guid=guid, text_a=text_a, label=label)) - return examples - - def get_labels(self): - """Gets the list of labels for this data set.""" - raise NotImplementedError() - - def get_quotechar(self): - return ' ' - - @classmethod - def _read_tsv(cls, input_file, quotechar=None): - ''' - read file - return format : - [ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ] - ''' - f = open(input_file) - data = [] - sentence = [] - label = [] - for line in f: - if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n": - if len(sentence) > 0: - data.append((sentence, label)) - sentence = [] - label = [] - continue - splits = line.split(quotechar) - sentence.append(splits[0]) - label.append(splits[-1][:-1]) - - if len(sentence) > 0: - data.append((sentence, label)) - sentence = [] - label = [] - return data - - -class MSRAProcessor(DataProcessor): - """Processor for the msra data set.""" - - def get_labels(self): - return ['B-NR', 'B-NS', 'B-NT', 'E-NR', 'E-NS', 'E-NT', 'M-NR', - 'M-NS', 'M-NT', 'O', 'S-NR', 'S-NS', 'S-NT', '[CLS]', '[SEP]'] - - -class OntoNotes4Processor(DataProcessor): - """Processor for the OntoNotes4 data set.""" - - def get_labels(self): - return ['B-GPE', 'B-LOC', 'B-ORG', 'B-PER', 'E-GPE', 'E-LOC', - 'E-ORG', 'E-PER', 'M-GPE', 'M-LOC', 'M-ORG', 'M-PER', 'O', - 'S-GPE', 'S-LOC', 'S-ORG', 'S-PER', '[CLS]', '[SEP]'] - - -class WeiboProcessor(DataProcessor): - """Processor for the Weibo data set.""" - - def get_labels(self): - return ['B-GPE.NAM', 'B-GPE.NOM', 'B-LOC.NAM', 'B-LOC.NOM', - 'B-ORG.NAM', 'B-ORG.NOM', 'B-PER.NAM', 'B-PER.NOM', 'E-GPE.NAM', - 'E-GPE.NOM', 'E-LOC.NAM', 'E-LOC.NOM', 'E-ORG.NAM', 'E-ORG.NOM', - 'E-PER.NAM', 'E-PER.NOM', 'M-GPE.NAM', 'M-LOC.NAM', 'M-LOC.NOM', - 'M-ORG.NAM', 'M-ORG.NOM', 'M-PER.NAM', 'M-PER.NOM', 'O', - 'S-GPE.NAM', 'S-LOC.NOM', 'S-PER.NAM', 'S-PER.NOM', '[CLS]', '[SEP]'] - - -class ResumeProcessor(DataProcessor): - """Processor for the resume data set.""" - - def get_labels(self): - return ['B-CONT', 'B-EDU', 'B-LOC', 'B-NAME', 'B-ORG', 'B-PRO', - 'B-RACE', 'B-TITLE', 'E-CONT', 'E-EDU', 'E-LOC', 'E-NAME', - 'E-ORG', 'E-PRO', 'E-RACE', 'E-TITLE', 'M-CONT', 'M-EDU', - 'M-LOC', 'M-NAME', 'M-ORG', 'M-PRO', 'M-RACE', 'M-TITLE', - 'O', 'S-NAME', 'S-ORG', 'S-RACE', '[CLS]', '[SEP]'] - - -class CMeEEProcessor(DataProcessor): - """Processor for the CMeEE data set.""" - - def get_quotechar(self): - return '\t' - - def get_labels(self): - return ['B-临床表现', 'B-医学检验项目', 'B-医疗程序', 'B-医疗设备', - 'B-微生物类', 'B-疾病', 'B-科室', 'B-药物', 'B-身体', 'I-临床表现', - 'I-医学检验项目', 'I-医疗程序', 'I-医疗设备', 'I-微生物类', - 'I-疾病', 'I-科室', 'I-药物', 'I-身体', 'O', '[CLS]', '[SEP]'] - - -class CLUENERProcessor(DataProcessor): - """Processor for the CLUENER data set.""" - - def get_quotechar(self): - return '\t' - - def get_labels(self): - return ['B-书名', 'B-公司', 'B-地址', 'B-姓名', 'B-政府', 'B-景点', - 'B-游戏', 'B-电影', 'B-组织机构', 'B-职位', 'I-书名', 'I-公司', - 'I-地址', 'I-姓名', 'I-政府', 'I-景点', 'I-游戏', 'I-电影', - 'I-组织机构', 'I-职位', 'O', '[CLS]', '[SEP]'] - - -class TaskDataset(Dataset): - def __init__(self, data_path, processor, mode='train'): - super().__init__() - self.data = self.load_data(data_path, processor, mode) - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.data[index] - - def load_data(self, data_path, processor, mode): - if mode == "train": - examples = processor.get_examples(data_path, mode) - elif mode == "test": - examples = processor.get_examples(data_path, mode) - elif mode == "dev": - examples = processor.get_examples(data_path, mode) - return examples - - -@dataclass -class TaskCollator: - args = None - tokenizer = None - ngram_dict = None - label2id = None - - def __call__(self, samples): - features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict) - # logger.info(" Num examples = %d", len(samples)) - - input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) - segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) - label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) - valid_ids = torch.tensor([f.valid_ids for f in features], dtype=torch.long) - - ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long) - ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long) - # ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long) - # ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long) - # ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long) - - # label_mask = torch.tensor([f.label_mask for f in features], dtype=torch.long) - return { - 'input_ids': input_ids, - 'ngram_ids': ngram_ids, - 'ngram_positions': ngram_positions, - 'attention_mask': input_mask, - 'token_type_ids': segment_ids, - 'labels': label_ids, - 'valid_ids': valid_ids, - } - - -class TaskDataModel(pl.LightningDataModule): - @staticmethod - def add_data_specific_args(parent_args): - parser = parent_args.add_argument_group('TASK NAME DataModel') - parser.add_argument('--data_dir', default='./data', type=str) - parser.add_argument('--num_workers', default=8, type=int) - parser.add_argument('--train_data', default='train.json', type=str) - parser.add_argument('--valid_data', default='dev.json', type=str) - parser.add_argument('--test_data', default='test.json', type=str) - parser.add_argument('--train_batchsize', default=16, type=int) - parser.add_argument('--valid_batchsize', default=32, type=int) - parser.add_argument('--max_seq_length', default=128, type=int) - - parser.add_argument('--texta_name', default='text', type=str) - parser.add_argument('--textb_name', default='sentence2', type=str) - parser.add_argument('--label_name', default='label', type=str) - parser.add_argument('--id_name', default='id', type=str) - - parser.add_argument('--dataset_name', default=None, type=str) - parser.add_argument('--vocab_file', - type=str, default=None, - help="Vocabulary mapping/file BERT was pretrainined on") - parser.add_argument("--do_lower_case", - action='store_true', - help="Set this flag if you are using an uncased model.") - parser.add_argument('--task_name', default='weibo', type=str) - - return parent_args - - def __init__(self, args): - super().__init__() - self.train_batchsize = args.train_batchsize - self.valid_batchsize = args.valid_batchsize - self.collator = TaskCollator() - self.collator.args = args - self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case) - self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer) - - processors = { - 'weibo': WeiboProcessor, - 'resume': ResumeProcessor, - 'msra': MSRAProcessor, - 'ontonotes4': OntoNotes4Processor, - 'cmeee': CMeEEProcessor, - 'cluener': CLUENERProcessor, - } - if args.task_name not in processors: - raise ValueError("Task not found: %s" % (args.task_name)) - processor = processors[args.task_name]() - # 生成id映射 - label_list = processor.get_labels() - label2id = {label: i for i, label in enumerate(label_list, 1)} - label2id["[PAD]"] = 0 - self.id2label = {v: k for k, v in label2id.items()} - self.collator.label2id = label2id - - if args.dataset_name is None: - self.train_data = TaskDataset(os.path.join( - args.data_dir, args.train_data), processor, mode='train') - self.valid_data = TaskDataset(os.path.join( - args.data_dir, args.valid_data), processor, mode='dev') - self.test_data = TaskDataset(os.path.join( - args.data_dir, args.test_data), processor, mode='test') - - else: - import datasets - ds = datasets.load_dataset(args.dataset_name) - self.train_data = ds['train'] - self.valid_data = ds['validation'] - self.test_data = ds['test'] - self.save_hyperparameters(args) - - def train_dataloader(self): - return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False, - collate_fn=self.collator) - - def val_dataloader(self): - return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False, - collate_fn=self.collator) - - def predict_dataloader(self): - return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False, - collate_fn=self.collator) - - -class LitModel(pl.LightningModule): - - @staticmethod - def add_model_specific_args(parent_args): - parser = parent_args.add_argument_group('BaseModel') - parser.add_argument('--markup', default='bios', type=str) - parser.add_argument('--middle_prefix', default='I-', type=str) - return parent_args - - def __init__(self, args, id2label): - super().__init__() - # config = ZenConfig(os.path.join(args.pretrained_model_path, 'config.json')) - self.model = ZenForTokenClassification.from_pretrained(args.pretrained_model_path, num_labels=len(id2label)) - self.seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix) - self.train_seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix) - self.id2label = id2label - self.label2id = {v: k for k, v in id2label.items()} - self.save_hyperparameters(args) - - def setup(self, stage) -> None: - if stage == 'fit': - train_loader = self.trainer._data_connector._train_dataloader_source.dataloader() - - # Calculate total steps - if self.trainer.max_epochs > 0: - world_size = self.trainer.world_size - tb_size = self.hparams.train_batchsize * max(1, world_size) - ab_size = self.trainer.accumulate_grad_batches - self.total_steps = (len(train_loader.dataset) * - self.trainer.max_epochs // tb_size) // ab_size - else: - self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches - - print('Total steps: {}' .format(self.total_steps)) - - def training_step(self, batch, batch_idx): - outputs = self.model(**batch) - loss, _ = outputs - # logits = outputs.logits - # preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2) - # preds = preds.detach().cpu().numpy() - # labels = batch['labels'].detach().cpu().numpy() - # num_labels = len(self.label2id) - # y_true = [] - # y_pred = [] - # for i, label in enumerate(labels): - # temp_1 = [] - # temp_2 = [] - # for j, m in enumerate(label): - # if j == 0: - # continue - # elif labels[i][j] == num_labels - 1: - # y_true.append(temp_1) - # y_pred.append(temp_2) - # break - # else: - # temp_1.append(self.id2label[labels[i][j]]) - # temp_2.append(self.id2label[preds[i][j]]) - - # self.train_seq_entity_score.update(y_true, y_pred) - # result = self.train_seq_entity_score.result() - # self.train_seq_entity_score.reset() - self.log('train_loss', loss) - - return loss - - def validation_step(self, batch, batch_idx): - outputs = self.model(**batch) - loss, logits = outputs - preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2) - preds = preds.detach().cpu().numpy() - labels = batch['labels'].detach().cpu().numpy() - num_labels = len(self.label2id) - y_true = [] - y_pred = [] - for i, label in enumerate(labels): - temp_1 = [] - temp_2 = [] - for j, m in enumerate(label): - if j == 0: - continue - elif labels[i][j] == num_labels - 1: - y_true.append(temp_1) - y_pred.append(temp_2) - break - else: - temp_1.append(self.id2label[labels[i][j]]) - temp_2.append(self.id2label[preds[i][j]]) - - self.seq_entity_score.update(y_true, y_pred) - self.log('val_loss', loss) - - def validation_epoch_end(self, outputs): - # compute metric for all process - score_dict, _ = self.seq_entity_score.result() - if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0: - print('score_dict:\n', score_dict) - # reset the metric after once validation - self.seq_entity_score.reset() - for k, v in score_dict.items(): - self.log('val_{}'.format(k), v) - - def configure_optimizers(self): - from fengshen.models.model_utils import configure_optimizers - return configure_optimizers(self) - - -class TaskModelCheckpoint: - @staticmethod - def add_argparse_args(parent_args): - parser = parent_args.add_argument_group('BaseModel') - - parser.add_argument('--monitor', default='train_loss', type=str) - parser.add_argument('--mode', default='min', type=str) - parser.add_argument('--dirpath', default='./log/', type=str) - parser.add_argument( - '--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str) - - parser.add_argument('--save_top_k', default=3, type=float) - parser.add_argument('--every_n_train_steps', default=100, type=float) - parser.add_argument('--save_weights_only', default=True, type=bool) - - return parent_args - - def __init__(self, args): - self.callbacks = ModelCheckpoint(monitor=args.monitor, - save_top_k=args.save_top_k, - mode=args.mode, - every_n_train_steps=args.every_n_train_steps, - save_weights_only=args.save_weights_only, - dirpath=args.dirpath, - filename=args.filename) - - -def save_test(data, args, data_model): - with open(args.output_save_path, 'w', encoding='utf-8') as f: - idx = 0 - for i in range(len(data)): - batch = data[i] - for sample in batch: - tmp_result = dict() - label_id = np.argmax(sample.numpy()) - tmp_result['id'] = data_model.test_data.data[idx]['id'] - tmp_result['label'] = data_model.id2label[label_id] - json_data = json.dumps(tmp_result, ensure_ascii=False) - f.write(json_data+'\n') - idx += 1 - print('save the result to '+args.output_save_path) - - -def main(): - total_parser = argparse.ArgumentParser("TASK NAME") - total_parser.add_argument('--pretrained_model_path', default='', type=str) - total_parser.add_argument('--output_save_path', - default='./predict.json', type=str) - # * Args for data preprocessing - total_parser = TaskDataModel.add_data_specific_args(total_parser) - # * Args for training - total_parser = pl.Trainer.add_argparse_args(total_parser) - total_parser = TaskModelCheckpoint.add_argparse_args(total_parser) - - # * Args for base model - from fengshen.models.model_utils import add_module_args - total_parser = add_module_args(total_parser) - total_parser = LitModel.add_model_specific_args(total_parser) - - args = total_parser.parse_args() - - checkpoint_callback = TaskModelCheckpoint(args).callbacks - lr_monitor = LearningRateMonitor(logging_interval='step') - trainer = pl.Trainer.from_argparse_args(args, - callbacks=[checkpoint_callback, lr_monitor] - ) - - data_model = TaskDataModel(args) - id2label = data_model.id2label - print('id2label:', id2label) - model = LitModel(args, id2label) - trainer.fit(model, data_model) - - -if __name__ == "__main__": - main() diff --git a/spaces/fffiloni/Music_Source_Separation/bytesep/models/pytorch_modules.py b/spaces/fffiloni/Music_Source_Separation/bytesep/models/pytorch_modules.py deleted file mode 100644 index 0bc51f0945d2764b8428611a8ecf109a0b344884..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Music_Source_Separation/bytesep/models/pytorch_modules.py +++ /dev/null @@ -1,204 +0,0 @@ -from typing import List, NoReturn - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def init_embedding(layer: nn.Module) -> NoReturn: - r"""Initialize a Linear or Convolutional layer.""" - nn.init.uniform_(layer.weight, -1.0, 1.0) - - if hasattr(layer, 'bias'): - if layer.bias is not None: - layer.bias.data.fill_(0.0) - - -def init_layer(layer: nn.Module) -> NoReturn: - r"""Initialize a Linear or Convolutional layer.""" - nn.init.xavier_uniform_(layer.weight) - - if hasattr(layer, "bias"): - if layer.bias is not None: - layer.bias.data.fill_(0.0) - - -def init_bn(bn: nn.Module) -> NoReturn: - r"""Initialize a Batchnorm layer.""" - bn.bias.data.fill_(0.0) - bn.weight.data.fill_(1.0) - bn.running_mean.data.fill_(0.0) - bn.running_var.data.fill_(1.0) - - -def act(x: torch.Tensor, activation: str) -> torch.Tensor: - - if activation == "relu": - return F.relu_(x) - - elif activation == "leaky_relu": - return F.leaky_relu_(x, negative_slope=0.01) - - elif activation == "swish": - return x * torch.sigmoid(x) - - else: - raise Exception("Incorrect activation!") - - -class Base: - def __init__(self): - r"""Base function for extracting spectrogram, cos, and sin, etc.""" - pass - - def spectrogram(self, input: torch.Tensor, eps: float = 0.0) -> torch.Tensor: - r"""Calculate spectrogram. - - Args: - input: (batch_size, segments_num) - eps: float - - Returns: - spectrogram: (batch_size, time_steps, freq_bins) - """ - (real, imag) = self.stft(input) - return torch.clamp(real ** 2 + imag ** 2, eps, np.inf) ** 0.5 - - def spectrogram_phase( - self, input: torch.Tensor, eps: float = 0.0 - ) -> List[torch.Tensor]: - r"""Calculate the magnitude, cos, and sin of the STFT of input. - - Args: - input: (batch_size, segments_num) - eps: float - - Returns: - mag: (batch_size, time_steps, freq_bins) - cos: (batch_size, time_steps, freq_bins) - sin: (batch_size, time_steps, freq_bins) - """ - (real, imag) = self.stft(input) - mag = torch.clamp(real ** 2 + imag ** 2, eps, np.inf) ** 0.5 - cos = real / mag - sin = imag / mag - return mag, cos, sin - - def wav_to_spectrogram_phase( - self, input: torch.Tensor, eps: float = 1e-10 - ) -> List[torch.Tensor]: - r"""Convert waveforms to magnitude, cos, and sin of STFT. - - Args: - input: (batch_size, channels_num, segment_samples) - eps: float - - Outputs: - mag: (batch_size, channels_num, time_steps, freq_bins) - cos: (batch_size, channels_num, time_steps, freq_bins) - sin: (batch_size, channels_num, time_steps, freq_bins) - """ - batch_size, channels_num, segment_samples = input.shape - - # Reshape input with shapes of (n, segments_num) to meet the - # requirements of the stft function. - x = input.reshape(batch_size * channels_num, segment_samples) - - mag, cos, sin = self.spectrogram_phase(x, eps=eps) - # mag, cos, sin: (batch_size * channels_num, 1, time_steps, freq_bins) - - _, _, time_steps, freq_bins = mag.shape - mag = mag.reshape(batch_size, channels_num, time_steps, freq_bins) - cos = cos.reshape(batch_size, channels_num, time_steps, freq_bins) - sin = sin.reshape(batch_size, channels_num, time_steps, freq_bins) - - return mag, cos, sin - - def wav_to_spectrogram( - self, input: torch.Tensor, eps: float = 1e-10 - ) -> List[torch.Tensor]: - - mag, cos, sin = self.wav_to_spectrogram_phase(input, eps) - return mag - - -class Subband: - def __init__(self, subbands_num: int): - r"""Warning!! This class is not used!! - - This class does not work as good as [1] which split subbands in the - time-domain. Please refere to [1] for formal implementation. - - [1] Liu, Haohe, et al. "Channel-wise subband input for better voice and - accompaniment separation on high resolution music." arXiv preprint arXiv:2008.05216 (2020). - - Args: - subbands_num: int, e.g., 4 - """ - self.subbands_num = subbands_num - - def analysis(self, x: torch.Tensor) -> torch.Tensor: - r"""Analysis time-frequency representation into subbands. Stack the - subbands along the channel axis. - - Args: - x: (batch_size, channels_num, time_steps, freq_bins) - - Returns: - output: (batch_size, channels_num * subbands_num, time_steps, freq_bins // subbands_num) - """ - batch_size, channels_num, time_steps, freq_bins = x.shape - - x = x.reshape( - batch_size, - channels_num, - time_steps, - self.subbands_num, - freq_bins // self.subbands_num, - ) - # x: (batch_size, channels_num, time_steps, subbands_num, freq_bins // subbands_num) - - x = x.transpose(2, 3) - - output = x.reshape( - batch_size, - channels_num * self.subbands_num, - time_steps, - freq_bins // self.subbands_num, - ) - # output: (batch_size, channels_num * subbands_num, time_steps, freq_bins // subbands_num) - - return output - - def synthesis(self, x: torch.Tensor) -> torch.Tensor: - r"""Synthesis subband time-frequency representations into original - time-frequency representation. - - Args: - x: (batch_size, channels_num * subbands_num, time_steps, freq_bins // subbands_num) - - Returns: - output: (batch_size, channels_num, time_steps, freq_bins) - """ - batch_size, subband_channels_num, time_steps, subband_freq_bins = x.shape - - channels_num = subband_channels_num // self.subbands_num - freq_bins = subband_freq_bins * self.subbands_num - - x = x.reshape( - batch_size, - channels_num, - self.subbands_num, - time_steps, - subband_freq_bins, - ) - # x: (batch_size, channels_num, subbands_num, time_steps, freq_bins // subbands_num) - - x = x.transpose(2, 3) - # x: (batch_size, channels_num, time_steps, subbands_num, freq_bins // subbands_num) - - output = x.reshape(batch_size, channels_num, time_steps, freq_bins) - # x: (batch_size, channels_num, time_steps, freq_bins) - - return output diff --git a/spaces/fffiloni/SplitTrack2MusicGen/audiocraft/modules/conditioners.py b/spaces/fffiloni/SplitTrack2MusicGen/audiocraft/modules/conditioners.py deleted file mode 100644 index 00e5deea62a17ae28fbc8fb72113f8011ec3072c..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/SplitTrack2MusicGen/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,986 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import random -import re -import typing as tp -import warnings - -from einops import rearrange -from num2words import num2words -import spacy -from transformers import T5EncoderModel, T5Tokenizer # type: ignore -import torchaudio -import torch -from torch import nn -from torch import Tensor -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio_dataset import SegmentInfo -from ..utils.autocast import TorchAutocast -from ..utils.utils import hash_trick, length_to_mask, collate - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: Tensor - length: Tensor - path: tp.List[tp.Optional[str]] = [] - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """This function transforms an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor]) - dim (int): the dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: a tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert type(condition) == tuple and \ - type(condition[0]) == Tensor and \ - type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(wav: Tensor) -> WavCondition: - """Create a nullified WavCondition from a wav tensor with appropriate shape. - - Args: - wav (Tensor): tensor of shape [B, T] - Returns: - WavCondition: wav condition with nullified wav. - """ - null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * wav.shape[0], device=wav.device), - path=['null_wav'] * wav.shape[0] - ) - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def attributes(self): - return {"text": self.text_attributes, "wav": self.wav_attributes} - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -class Tokenizer: - """Base class for all tokenizers - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATIONS = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__( - self, - texts: tp.List[tp.Optional[str]], - return_text: bool = False - ) -> tp.Tuple[Tensor, Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (tp.List[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tp.Tuple[Tensor, Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuations - text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. We allow the output dim to be different - than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim, output_dim): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == "whitespace": - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == "noop": - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__["t5"] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device) - mask = inputs["attention_mask"] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs["attention_mask"] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, wav_length: WavCondition) -> WavCondition: - wav, length, path = wav_length - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), path) - - def _get_wav_embedding(self, wav: Tensor) -> Tensor: - """Gets as input a wav and returns a dense vector of conditions.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, inputs: WavCondition) -> ConditionType: - """ - Args: - input (WavCondition): Tuple of (waveform, lengths). - Returns: - ConditionType: Dense vector representing the conditioning along with its' mask. - """ - wav, lengths, path = inputs - with torch.no_grad(): - embeds = self._get_wav_embedding(wav) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by - the insight the drums and bass often dominate the chroma, leading to the chroma not containing the - information about melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma for the chroma extractor. - radix2_exp (int): Radix2 exponent for the chroma extractor. - duration (float): Duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): If True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = False, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device) - self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3} - self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, - device=device, **kwargs) - self.chroma_len = self._get_chroma_len() - - def _downsampling_factor(self): - return self.chroma.winhop - - def _get_chroma_len(self): - """Get length of chroma during training""" - dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_filtered_wav(self, wav): - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels) - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_idx] # extract stem - stems = stems.sum(1) # merge extracted stems - stems = stems.mean(1, keepdim=True) # mono - stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1) - return stems - - @torch.no_grad() - def _get_wav_embedding(self, wav): - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self.chroma(wav) - stems = self._get_filtered_wav(wav) - chroma = self.chroma(stems) - - if self.match_len_on_eval: - b, t, c = chroma.shape - if t > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})') - elif t < self.chroma_len: - chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t)) - logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})') - return chroma - - -class ChromaExtractor(nn.Module): - """Chroma extraction class, handles chroma extraction and quantization. - - Args: - sample_rate (int): Sample rate. - n_chroma (int): Number of chroma to consider. - radix2_exp (int): Radix2 exponent. - nfft (tp.Optional[int], optional): Number of FFT. - winlen (tp.Optional[int], optional): Window length. - winhop (tp.Optional[int], optional): Window hop size. - argmax (bool, optional): Whether to use argmax. Defaults to False. - norm (float, optional): Norm for chroma normalization. Defaults to inf. - device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu. - """ - def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, - nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, - argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - from librosa import filters - self.device = device - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.winlen = winlen or 2 ** radix2_exp - self.nfft = nfft or self.winlen - self.winhop = winhop or (self.winlen // 4) - self.sr = sample_rate - self.n_chroma = n_chroma - self.norm = norm - self.argmax = argmax - self.window = torch.hann_window(self.winlen).to(device) - self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0, - n_chroma=self.n_chroma)).to(device) - self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen, - hop_length=self.winhop, power=2, center=True, - pad=0, normalized=True).to(device) - - def forward(self, wav): - with self.autocast: - T = wav.shape[-1] - # in case we are getting a wav that was dropped out (nullified) - # make sure wav length is no less that nfft - if T < self.nfft: - pad = self.nfft - T - r = 0 if pad % 2 == 0 else 1 - wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0) - assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}' - spec = self.spec(wav).squeeze(1) - raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec) - norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6) - norm_chroma = rearrange(norm_chroma, "b d t -> b t d") - - if self.argmax: - idx = norm_chroma.argmax(-1, keepdims=True) - norm_chroma[:] = 0 - norm_chroma.scatter_(dim=-1, index=idx, value=1) - - return norm_chroma - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str): - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using "nullify_condition". - If the condition is of any other type, set its' value to None. - Works in-place. - """ - if condition_type not in ["text", "wav"]: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'wav' or 'text' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f"but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == "wav": - wav, length, path = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base class for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Applies dropout with a given probability per attribute. This is different from the behavior of - ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example, - "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout - where if "artist" is dropped "genre" must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Applies Classifier Free Guidance dropout, meaning all attributes - are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Main class to provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - merge_text_conditions_p (float, optional): Probability to merge all text sources - into a single text condition. Defaults to 0. - drop_desc_p (float, optional): Probability to drop the original description - when merging all text sources into a single text condition. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types. - """ - def __init__( - self, - conditioners: tp.Dict[str, BaseConditioner], - merge_text_conditions_p: float = 0, - drop_desc_p: float = 0, - device: tp.Union[torch.device, str] = "cpu", - ): - super().__init__() - self.device = device - self.merge_text_conditions_p = merge_text_conditions_p - self.drop_desc_p = drop_desc_p - self.conditioners = nn.ModuleDict(conditioners) - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([type(x) == ConditioningAttributes for x in inputs]), \ - "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \ - f" but types were {set([type(x) for x in inputs])}" - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - - assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \ - f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}" - - for attribute, batch in chain(text.items(), wavs.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners - and the tokenized representations. The output is for example: - - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - """ - batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - - def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0): - def is_valid(k, v): - k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument'] - v_valid = v is not None and isinstance(v, (int, float, str, list)) - return k_valid and v_valid - - def process_value(v): - if isinstance(v, (int, float, str)): - return v - if isinstance(v, list): - return ", ".join(v) - else: - RuntimeError(f"unknown type for text value! ({type(v), v})") - - desc = cond.text['description'] - meta_data = "" - if random.uniform(0, 1) < merge_text_conditions_p: - meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)] - random.shuffle(meta_pairs) - meta_data = ". ".join(meta_pairs) - desc = desc if not random.uniform(0, 1) < drop_desc_p else None - - if desc is None: - desc = meta_data if len(meta_data) > 1 else None - else: - desc = desc.rstrip('.') + ". " + meta_data - cond.text['description'] = desc.strip() if desc else None - - if self.training and self.merge_text_conditions_p: - for sample in samples: - _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p) - - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - batch_per_attribute[condition].append(text[condition]) - - return batch_per_attribute - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]): - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attribtues. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - dict: A dicionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lens = defaultdict(list) - paths = defaultdict(list) - out = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, path = sample.wav[attribute] - wavs[attribute].append(wav.flatten()) - lens[attribute].append(length) - paths[attribute].append(path) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition(stacked_wav.unsqueeze(1), - torch.cat(lens['self_wav']), paths[attribute]) # type: ignore - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (Tensor): Transformer input. - conditions (tp.Dict[str, ConditionType]): Dict of conditions. - Returns: - tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == "sum": - input += cond - elif op == "input_interpolate": - cond = rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += rearrange(cond, "b d t -> b t d") - elif op == "prepend": - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == "cross": - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/request.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/request.js deleted file mode 100644 index 3f1eeca6c1ac502dfb12ae0144a3a5cced4f34a3..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/request.js +++ /dev/null @@ -1,525 +0,0 @@ -/*! - * express - * Copyright(c) 2009-2013 TJ Holowaychuk - * Copyright(c) 2013 Roman Shtylman - * Copyright(c) 2014-2015 Douglas Christopher Wilson - * MIT Licensed - */ - -'use strict'; - -/** - * Module dependencies. - * @private - */ - -var accepts = require('accepts'); -var deprecate = require('depd')('express'); -var isIP = require('net').isIP; -var typeis = require('type-is'); -var http = require('http'); -var fresh = require('fresh'); -var parseRange = require('range-parser'); -var parse = require('parseurl'); -var proxyaddr = require('proxy-addr'); - -/** - * Request prototype. - * @public - */ - -var req = Object.create(http.IncomingMessage.prototype) - -/** - * Module exports. - * @public - */ - -module.exports = req - -/** - * Return request header. - * - * The `Referrer` header field is special-cased, - * both `Referrer` and `Referer` are interchangeable. - * - * Examples: - * - * req.get('Content-Type'); - * // => "text/plain" - * - * req.get('content-type'); - * // => "text/plain" - * - * req.get('Something'); - * // => undefined - * - * Aliased as `req.header()`. - * - * @param {String} name - * @return {String} - * @public - */ - -req.get = -req.header = function header(name) { - if (!name) { - throw new TypeError('name argument is required to req.get'); - } - - if (typeof name !== 'string') { - throw new TypeError('name must be a string to req.get'); - } - - var lc = name.toLowerCase(); - - switch (lc) { - case 'referer': - case 'referrer': - return this.headers.referrer - || this.headers.referer; - default: - return this.headers[lc]; - } -}; - -/** - * To do: update docs. - * - * Check if the given `type(s)` is acceptable, returning - * the best match when true, otherwise `undefined`, in which - * case you should respond with 406 "Not Acceptable". - * - * The `type` value may be a single MIME type string - * such as "application/json", an extension name - * such as "json", a comma-delimited list such as "json, html, text/plain", - * an argument list such as `"json", "html", "text/plain"`, - * or an array `["json", "html", "text/plain"]`. When a list - * or array is given, the _best_ match, if any is returned. - * - * Examples: - * - * // Accept: text/html - * req.accepts('html'); - * // => "html" - * - * // Accept: text/*, application/json - * req.accepts('html'); - * // => "html" - * req.accepts('text/html'); - * // => "text/html" - * req.accepts('json, text'); - * // => "json" - * req.accepts('application/json'); - * // => "application/json" - * - * // Accept: text/*, application/json - * req.accepts('image/png'); - * req.accepts('png'); - * // => undefined - * - * // Accept: text/*;q=.5, application/json - * req.accepts(['html', 'json']); - * req.accepts('html', 'json'); - * req.accepts('html, json'); - * // => "json" - * - * @param {String|Array} type(s) - * @return {String|Array|Boolean} - * @public - */ - -req.accepts = function(){ - var accept = accepts(this); - return accept.types.apply(accept, arguments); -}; - -/** - * Check if the given `encoding`s are accepted. - * - * @param {String} ...encoding - * @return {String|Array} - * @public - */ - -req.acceptsEncodings = function(){ - var accept = accepts(this); - return accept.encodings.apply(accept, arguments); -}; - -req.acceptsEncoding = deprecate.function(req.acceptsEncodings, - 'req.acceptsEncoding: Use acceptsEncodings instead'); - -/** - * Check if the given `charset`s are acceptable, - * otherwise you should respond with 406 "Not Acceptable". - * - * @param {String} ...charset - * @return {String|Array} - * @public - */ - -req.acceptsCharsets = function(){ - var accept = accepts(this); - return accept.charsets.apply(accept, arguments); -}; - -req.acceptsCharset = deprecate.function(req.acceptsCharsets, - 'req.acceptsCharset: Use acceptsCharsets instead'); - -/** - * Check if the given `lang`s are acceptable, - * otherwise you should respond with 406 "Not Acceptable". - * - * @param {String} ...lang - * @return {String|Array} - * @public - */ - -req.acceptsLanguages = function(){ - var accept = accepts(this); - return accept.languages.apply(accept, arguments); -}; - -req.acceptsLanguage = deprecate.function(req.acceptsLanguages, - 'req.acceptsLanguage: Use acceptsLanguages instead'); - -/** - * Parse Range header field, capping to the given `size`. - * - * Unspecified ranges such as "0-" require knowledge of your resource length. In - * the case of a byte range this is of course the total number of bytes. If the - * Range header field is not given `undefined` is returned, `-1` when unsatisfiable, - * and `-2` when syntactically invalid. - * - * When ranges are returned, the array has a "type" property which is the type of - * range that is required (most commonly, "bytes"). Each array element is an object - * with a "start" and "end" property for the portion of the range. - * - * The "combine" option can be set to `true` and overlapping & adjacent ranges - * will be combined into a single range. - * - * NOTE: remember that ranges are inclusive, so for example "Range: users=0-3" - * should respond with 4 users when available, not 3. - * - * @param {number} size - * @param {object} [options] - * @param {boolean} [options.combine=false] - * @return {number|array} - * @public - */ - -req.range = function range(size, options) { - var range = this.get('Range'); - if (!range) return; - return parseRange(size, range, options); -}; - -/** - * Return the value of param `name` when present or `defaultValue`. - * - * - Checks route placeholders, ex: _/user/:id_ - * - Checks body params, ex: id=12, {"id":12} - * - Checks query string params, ex: ?id=12 - * - * To utilize request bodies, `req.body` - * should be an object. This can be done by using - * the `bodyParser()` middleware. - * - * @param {String} name - * @param {Mixed} [defaultValue] - * @return {String} - * @public - */ - -req.param = function param(name, defaultValue) { - var params = this.params || {}; - var body = this.body || {}; - var query = this.query || {}; - - var args = arguments.length === 1 - ? 'name' - : 'name, default'; - deprecate('req.param(' + args + '): Use req.params, req.body, or req.query instead'); - - if (null != params[name] && params.hasOwnProperty(name)) return params[name]; - if (null != body[name]) return body[name]; - if (null != query[name]) return query[name]; - - return defaultValue; -}; - -/** - * Check if the incoming request contains the "Content-Type" - * header field, and it contains the given mime `type`. - * - * Examples: - * - * // With Content-Type: text/html; charset=utf-8 - * req.is('html'); - * req.is('text/html'); - * req.is('text/*'); - * // => true - * - * // When Content-Type is application/json - * req.is('json'); - * req.is('application/json'); - * req.is('application/*'); - * // => true - * - * req.is('html'); - * // => false - * - * @param {String|Array} types... - * @return {String|false|null} - * @public - */ - -req.is = function is(types) { - var arr = types; - - // support flattened arguments - if (!Array.isArray(types)) { - arr = new Array(arguments.length); - for (var i = 0; i < arr.length; i++) { - arr[i] = arguments[i]; - } - } - - return typeis(this, arr); -}; - -/** - * Return the protocol string "http" or "https" - * when requested with TLS. When the "trust proxy" - * setting trusts the socket address, the - * "X-Forwarded-Proto" header field will be trusted - * and used if present. - * - * If you're running behind a reverse proxy that - * supplies https for you this may be enabled. - * - * @return {String} - * @public - */ - -defineGetter(req, 'protocol', function protocol(){ - var proto = this.connection.encrypted - ? 'https' - : 'http'; - var trust = this.app.get('trust proxy fn'); - - if (!trust(this.connection.remoteAddress, 0)) { - return proto; - } - - // Note: X-Forwarded-Proto is normally only ever a - // single value, but this is to be safe. - var header = this.get('X-Forwarded-Proto') || proto - var index = header.indexOf(',') - - return index !== -1 - ? header.substring(0, index).trim() - : header.trim() -}); - -/** - * Short-hand for: - * - * req.protocol === 'https' - * - * @return {Boolean} - * @public - */ - -defineGetter(req, 'secure', function secure(){ - return this.protocol === 'https'; -}); - -/** - * Return the remote address from the trusted proxy. - * - * The is the remote address on the socket unless - * "trust proxy" is set. - * - * @return {String} - * @public - */ - -defineGetter(req, 'ip', function ip(){ - var trust = this.app.get('trust proxy fn'); - return proxyaddr(this, trust); -}); - -/** - * When "trust proxy" is set, trusted proxy addresses + client. - * - * For example if the value were "client, proxy1, proxy2" - * you would receive the array `["client", "proxy1", "proxy2"]` - * where "proxy2" is the furthest down-stream and "proxy1" and - * "proxy2" were trusted. - * - * @return {Array} - * @public - */ - -defineGetter(req, 'ips', function ips() { - var trust = this.app.get('trust proxy fn'); - var addrs = proxyaddr.all(this, trust); - - // reverse the order (to farthest -> closest) - // and remove socket address - addrs.reverse().pop() - - return addrs -}); - -/** - * Return subdomains as an array. - * - * Subdomains are the dot-separated parts of the host before the main domain of - * the app. By default, the domain of the app is assumed to be the last two - * parts of the host. This can be changed by setting "subdomain offset". - * - * For example, if the domain is "tobi.ferrets.example.com": - * If "subdomain offset" is not set, req.subdomains is `["ferrets", "tobi"]`. - * If "subdomain offset" is 3, req.subdomains is `["tobi"]`. - * - * @return {Array} - * @public - */ - -defineGetter(req, 'subdomains', function subdomains() { - var hostname = this.hostname; - - if (!hostname) return []; - - var offset = this.app.get('subdomain offset'); - var subdomains = !isIP(hostname) - ? hostname.split('.').reverse() - : [hostname]; - - return subdomains.slice(offset); -}); - -/** - * Short-hand for `url.parse(req.url).pathname`. - * - * @return {String} - * @public - */ - -defineGetter(req, 'path', function path() { - return parse(this).pathname; -}); - -/** - * Parse the "Host" header field to a hostname. - * - * When the "trust proxy" setting trusts the socket - * address, the "X-Forwarded-Host" header field will - * be trusted. - * - * @return {String} - * @public - */ - -defineGetter(req, 'hostname', function hostname(){ - var trust = this.app.get('trust proxy fn'); - var host = this.get('X-Forwarded-Host'); - - if (!host || !trust(this.connection.remoteAddress, 0)) { - host = this.get('Host'); - } else if (host.indexOf(',') !== -1) { - // Note: X-Forwarded-Host is normally only ever a - // single value, but this is to be safe. - host = host.substring(0, host.indexOf(',')).trimRight() - } - - if (!host) return; - - // IPv6 literal support - var offset = host[0] === '[' - ? host.indexOf(']') + 1 - : 0; - var index = host.indexOf(':', offset); - - return index !== -1 - ? host.substring(0, index) - : host; -}); - -// TODO: change req.host to return host in next major - -defineGetter(req, 'host', deprecate.function(function host(){ - return this.hostname; -}, 'req.host: Use req.hostname instead')); - -/** - * Check if the request is fresh, aka - * Last-Modified and/or the ETag - * still match. - * - * @return {Boolean} - * @public - */ - -defineGetter(req, 'fresh', function(){ - var method = this.method; - var res = this.res - var status = res.statusCode - - // GET or HEAD for weak freshness validation only - if ('GET' !== method && 'HEAD' !== method) return false; - - // 2xx or 304 as per rfc2616 14.26 - if ((status >= 200 && status < 300) || 304 === status) { - return fresh(this.headers, { - 'etag': res.get('ETag'), - 'last-modified': res.get('Last-Modified') - }) - } - - return false; -}); - -/** - * Check if the request is stale, aka - * "Last-Modified" and / or the "ETag" for the - * resource has changed. - * - * @return {Boolean} - * @public - */ - -defineGetter(req, 'stale', function stale(){ - return !this.fresh; -}); - -/** - * Check if the request was an _XMLHttpRequest_. - * - * @return {Boolean} - * @public - */ - -defineGetter(req, 'xhr', function xhr(){ - var val = this.get('X-Requested-With') || ''; - return val.toLowerCase() === 'xmlhttprequest'; -}); - -/** - * Helper function for creating a getter on an object. - * - * @param {Object} obj - * @param {String} name - * @param {Function} getter - * @private - */ -function defineGetter(obj, name, getter) { - Object.defineProperty(obj, name, { - configurable: true, - enumerable: true, - get: getter - }); -} diff --git a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/evaluation/losses/__init__.py b/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/evaluation/losses/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/fffiloni/spectrogram-to-music/style.css b/spaces/fffiloni/spectrogram-to-music/style.css deleted file mode 100644 index a03bcc7436bcbb3eb4661c9e79445530473e137d..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/spectrogram-to-music/style.css +++ /dev/null @@ -1,55 +0,0 @@ -#col-container, #col-container-2 {max-width: 510px; margin-left: auto; margin-right: auto;} -a {text-decoration-line: underline; font-weight: 600;} -div#record_btn > .mt-6 { - margin-top: 0!important; -} -div#record_btn > .mt-6 button { - width: 100%; - height: 40px; -} -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} -.footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; -} -.dark .footer { - border-color: #303030; -} -.dark .footer>p { - background: #0b0f19; -} -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_6.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_6.py deleted file mode 100644 index 5eba13d72ad90b0985ada22863247815f1f9608a..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_6.py +++ /dev/null @@ -1,26 +0,0 @@ - -import re - -def is_spam(message): - # Check for excessive use of special characters - special_chars = re.findall(r'[\*\.\(\)\\\-/@\[\]<>]', message) - if len(special_chars) > 20: - return True - - # Check for excessive use of numbers - numbers = re.findall(r'\d+', message) - if len(numbers) > 15: - return True - - # Check for pattern of shortened urls - urls = re.findall(r'(https?://[a-zA-Z0-9./]+)', message) - if len(urls) > 5: - return True - - # Check for presence of keywords in the message - keywords = ['상한가', '추천', '입장', '무료'] - for keyword in keywords: - if keyword in message: - return True - - return False diff --git a/spaces/flax-community/clip-reply-demo/model/model.py b/spaces/flax-community/clip-reply-demo/model/model.py deleted file mode 100644 index b1f5b8c8b2d90852df93fc94a925da2d907ffe2e..0000000000000000000000000000000000000000 --- a/spaces/flax-community/clip-reply-demo/model/model.py +++ /dev/null @@ -1,471 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional, Tuple - -import flax.linen as nn -import jax -import jax.numpy as jnp -from flax.core.frozen_dict import FrozenDict -from transformers import FLAX_MODEL_MAPPING, FlaxCLIPVisionModel -from transformers.modeling_flax_utils import FlaxPreTrainedModel -from transformers.models.clip.modeling_flax_clip import FlaxCLIPOutput -from transformers.utils import logging - -from .config import HybridCLIPConfig - -logger = logging.get_logger(__name__) - - -class FlaxHybridCLIPModule(nn.Module): - config: HybridCLIPConfig - dtype: jnp.dtype = jnp.float32 - - def setup(self): - text_config = self.config.text_config - vision_config = self.config.vision_config - - self.projection_dim = self.config.projection_dim - self.text_embed_dim = text_config.hidden_size - self.vision_embed_dim = vision_config.hidden_size - - text_module = FLAX_MODEL_MAPPING[self.config.text_config.__class__].module_class - vision_module = FLAX_MODEL_MAPPING.get( - self.config.vision_config.__class__, FlaxCLIPVisionModel - ).module_class - - self.text_model = text_module(text_config, dtype=self.dtype) - self.vision_model = vision_module(vision_config, dtype=self.dtype) - - self.visual_projection = nn.Dense( - self.projection_dim, - dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(0.02, dtype=self.dtype), - use_bias=False, - ) - self.text_projection = nn.Dense( - self.projection_dim, - dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(0.02, dtype=self.dtype), - use_bias=False, - ) - self.logit_scale = self.param("logit_scale", jax.nn.initializers.ones, []) - - def __call__( - self, - input_ids=None, - pixel_values=None, - attention_mask=None, - position_ids=None, - token_type_ids=None, - deterministic: bool = True, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): - return_dict = ( - return_dict if return_dict is not None else self.config.return_dict - ) - - vision_outputs = self.vision_model( - pixel_values=pixel_values, - deterministic=deterministic, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - deterministic=deterministic, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - image_embeds = vision_outputs[1] - image_embeds = self.visual_projection(image_embeds) - - text_embeds = text_outputs[1] - text_embeds = self.text_projection(text_embeds) - - # normalized features - image_embeds = image_embeds / jnp.linalg.norm( - image_embeds, axis=-1, keepdims=True - ) - text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) - - # cosine similarity as logits - logit_scale = jnp.exp(self.logit_scale) - logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale - logits_per_image = logits_per_text.T - - if not return_dict: - return ( - logits_per_image, - logits_per_text, - text_embeds, - image_embeds, - text_outputs, - vision_outputs, - ) - - return FlaxCLIPOutput( - logits_per_image=logits_per_image, - logits_per_text=logits_per_text, - text_embeds=text_embeds, - image_embeds=image_embeds, - text_model_output=text_outputs, - vision_model_output=vision_outputs, - ) - - -class FlaxHybridCLIP(FlaxPreTrainedModel): - config_class = HybridCLIPConfig - module_class = FlaxHybridCLIPModule - - def __init__( - self, - config: HybridCLIPConfig, - input_shape: Optional[Tuple] = None, - seed: int = 0, - dtype: jnp.dtype = jnp.float32, - **kwargs, - ): - if input_shape is None: - input_shape = ( - (1, 1), - ( - 1, - config.vision_config.image_size, - config.vision_config.image_size, - 3, - ), - ) - kwargs.pop('_do_init', None) # temp fix possibly related: https://github.com/huggingface/transformers/issues/15766 - module = self.module_class(config=config, dtype=dtype, **kwargs) - super().__init__( - config, module, input_shape=input_shape, seed=seed, dtype=dtype - ) - - def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict: - # init input tensor - input_ids = jnp.zeros(input_shape[0], dtype="i4") - position_ids = jnp.broadcast_to( - jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0] - ) - token_type_ids = jnp.ones_like(input_ids) - attention_mask = jnp.ones_like(input_ids) - - pixel_values = jax.random.normal(rng, input_shape[1]) - - params_rng, dropout_rng = jax.random.split(rng) - rngs = {"params": params_rng, "dropout": dropout_rng} - - return self.module.init( - rngs, input_ids, pixel_values, attention_mask, position_ids, token_type_ids - )["params"] - - def __call__( - self, - input_ids, - pixel_values, - attention_mask=None, - position_ids=None, - token_type_ids=None, - params: dict = None, - dropout_rng: jax.random.PRNGKey = None, - train: bool = False, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ): - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.return_dict - ) - - if position_ids is None: - position_ids = jnp.broadcast_to( - jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape - ) - - if token_type_ids is None: - token_type_ids = jnp.zeros_like(input_ids) - - if attention_mask is None: - attention_mask = jnp.ones_like(input_ids) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - return self.module.apply( - {"params": params or self.params}, - jnp.array(input_ids, dtype="i4"), - jnp.array(pixel_values, dtype=jnp.float32), - jnp.array(attention_mask, dtype="i4"), - jnp.array(position_ids, dtype="i4"), - jnp.array(token_type_ids, dtype="i4"), - not train, - output_attentions, - output_hidden_states, - return_dict, - rngs=rngs, - ) - - def get_text_features( - self, - input_ids, - attention_mask=None, - position_ids=None, - token_type_ids=None, - dropout_rng: jax.random.PRNGKey = None, - train=False, - ): - r""" - Args: - input_ids (:obj:`numpy.ndarray` of shape :obj:`(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See - :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` - for details. - `What are input IDs? <../glossary.html#input-ids>`__ - Returns: - text_features (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, output_dim`): The text embeddings - obtained by applying the projection layer to the pooled output of text model. - """ - if position_ids is None: - position_ids = jnp.broadcast_to( - jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape - ) - - if token_type_ids is None: - token_type_ids = jnp.zeros_like(input_ids) - - if attention_mask is None: - attention_mask = jnp.ones_like(input_ids) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - def _get_features( - module, - input_ids, - attention_mask, - position_ids, - token_type_ids, - deterministic, - ): - text_outputs = module.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - token_type_ids=token_type_ids, - deterministic=deterministic, - ) - pooled_output = text_outputs[1] - text_features = module.text_projection(pooled_output) - return text_features - - return self.module.apply( - {"params": self.params}, - jnp.array(input_ids, dtype="i4"), - jnp.array(attention_mask, dtype="i4"), - jnp.array(position_ids, dtype="i4"), - jnp.array(token_type_ids, dtype="i4"), - not train, - method=_get_features, - rngs=rngs, - ) - - def get_image_features( - self, pixel_values, dropout_rng: jax.random.PRNGKey = None, train=False - ): - r""" - Args: - pixel_values (:obj:`numpy.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained - using :class:`~transformers.ImageFeatureExtractionMixin`. See - :meth:`transformers.ImageFeatureExtractionMixin.__call__` for details. - Returns: - image_features (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, output_dim`): The image embeddings - obtained by applying the projection layer to the pooled output of vision model. - """ - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - def _get_features(module, pixel_values, deterministic): - vision_outputs = module.vision_model( - pixel_values=pixel_values, deterministic=deterministic - ) - pooled_output = vision_outputs[1] # pooled_output - image_features = module.visual_projection(pooled_output) - return image_features - - return self.module.apply( - {"params": self.params}, - jnp.array(pixel_values, dtype=jnp.float32), - not train, - method=_get_features, - rngs=rngs, - ) - - @classmethod - def from_text_vision_pretrained( - cls, - text_model_name_or_path: str = None, - vision_model_name_or_path: str = None, - *model_args, - **kwargs, - ) -> FlaxPreTrainedModel: - """ - Params: - text_model_name_or_path (:obj: `str`, `optional`): - Information necessary to initiate the text model. Can be either: - - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under - a user or organization name, like ``dbmdz/bert-base-german-cased``. - - A path to a `directory` containing model weights saved using - :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In - this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided - as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in - a Flax model using the provided conversion scripts and loading the Flax model afterwards. - vision_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): - Information necessary to initiate the vision model. Can be either: - - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under - a user or organization name, like ``dbmdz/bert-base-german-cased``. - - A path to a `directory` containing model weights saved using - :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In - this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided - as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in - a Flax model using the provided conversion scripts and loading the Flax model afterwards. - model_args (remaining positional arguments, `optional`): - All remaning positional arguments will be passed to the underlying model's ``__init__`` method. - kwargs (remaining dictionary of keyword arguments, `optional`): - Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., - :obj:`output_attentions=True`). - - To update the text configuration, use the prefix `text_` for each configuration parameter. - - To update the vision configuration, use the prefix `vision_` for each configuration parameter. - - To update the parent model configuration, do not use a prefix for each configuration parameter. - Behaves differently depending on whether a :obj:`config` is provided or automatically loaded. - Example:: - >>> from transformers import FlaxHybridCLIP - >>> # initialize a model from pretrained BERT and CLIP models. Note that the projection layers will be randomly initialized. - >>> # If using CLIP's vision model the vision projection layer will be initialized using pre-trained weights - >>> model = FlaxHybridCLIP.from_text_vision_pretrained('bert-base-uncased', 'openai/clip-vit-base-patch32') - >>> # saving model after fine-tuning - >>> model.save_pretrained("./bert-clip") - >>> # load fine-tuned model - >>> model = FlaxHybridCLIP.from_pretrained("./bert-clip") - """ - - kwargs_text = { - argument[len("text_") :]: value - for argument, value in kwargs.items() - if argument.startswith("text_") - } - - kwargs_vision = { - argument[len("vision_") :]: value - for argument, value in kwargs.items() - if argument.startswith("vision_") - } - - # remove text, vision kwargs from kwargs - for key in kwargs_text.keys(): - del kwargs["text_" + key] - for key in kwargs_vision.keys(): - del kwargs["vision_" + key] - - # Load and initialize the text and vision model - text_model = kwargs_text.pop("model", None) - if text_model is None: - assert ( - text_model_name_or_path is not None - ), "If `model` is not defined as an argument, a `text_model_name_or_path` has to be defined" - from transformers import FlaxAutoModel - - if "config" not in kwargs_text: - from transformers import AutoConfig - - text_config = AutoConfig.from_pretrained(text_model_name_or_path) - kwargs_text["config"] = text_config - - text_model = FlaxAutoModel.from_pretrained( - text_model_name_or_path, *model_args, **kwargs_text - ) - - vision_model = kwargs_vision.pop("model", None) - if vision_model is None: - assert ( - vision_model_name_or_path is not None - ), "If `model` is not defined as an argument, a `vision_model_name_or_path` has to be defined" - from transformers import FlaxAutoModel - - if "config" not in kwargs_vision: - from transformers import AutoConfig - - vision_config = AutoConfig.from_pretrained(vision_model_name_or_path) - kwargs_vision["config"] = vision_config - - vision_model = FlaxAutoModel.from_pretrained( - vision_model_name_or_path, *model_args, **kwargs_vision - ) - - # instantiate config with corresponding kwargs - dtype = kwargs.pop("dtype", jnp.float32) - config = HybridCLIPConfig.from_text_vision_configs( - text_model.config, vision_model.config, **kwargs - ) - - # init model - model = cls(config, *model_args, dtype=dtype, **kwargs) - - if vision_config.model_type == "clip": - model.params["vision_model"]["vision_model"] = vision_model.params[ - "vision_model" - ] - model.params["visual_projection"]["kernel"] = vision_model.params[ - "visual_projection" - ]["kernel"] - else: - model.params["vision_model"] = vision_model.params - - model.params["text_model"] = text_model.params - - return model diff --git a/spaces/flax-community/roberta-hindi/multiapp.py b/spaces/flax-community/roberta-hindi/multiapp.py deleted file mode 100644 index e4220a4e68ac3ba288ee68c19e175c2e149d6e81..0000000000000000000000000000000000000000 --- a/spaces/flax-community/roberta-hindi/multiapp.py +++ /dev/null @@ -1,15 +0,0 @@ -import streamlit as st - - -class MultiApp: - def __init__(self): - self.apps = [] - - def add_app(self, title, func): - self.apps.append({"title": title, "function": func}) - - def run(self): - st.sidebar.header("Navigation") - app = st.sidebar.radio("", self.apps, format_func=lambda app: app["title"]) - - app["function"]() diff --git a/spaces/flowers-team/SocialAISchool/data_analysis.py b/spaces/flowers-team/SocialAISchool/data_analysis.py deleted file mode 100644 index 86da73fb75be76272519a6d0bf5f7fb6b3958190..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/data_analysis.py +++ /dev/null @@ -1,1650 +0,0 @@ -#!/usr/bin/env python -import re -import itertools -import math -from itertools import chain -import time - -# import seaborn -import numpy as np -import os -from collections import OrderedDict, defaultdict -import pandas as pd -import matplotlib.pyplot as plt -import sys -from termcolor import cprint, colored -from pathlib import Path -import pickle - -eval_metric = "test_success_rates" -# eval_metric = "exploration_bonus_mean" - -super_title = "" -# super_title = "PPO - No exploration bonus" -# super_title = "Count Based exploration bonus (Grid Search)" -# super_title = "PPO + RND" -# super_title = "PPO + RIDE" - -agg_title = "" - -color_dict = None -eval_filename = None - -max_frames = 20_000_000 - -draw_legend = True -per_seed = False -study_eval = True - -plot_train = True -plot_test = True - -plot_aggregated_test = False -plot_only_aggregated_test = False - - -train_inc_font = 3 - -xnbins = 4 -ynbins = 3 - -steps_denom = 1e6 - -# Global vas for tracking and labeling data at load time. -exp_idx = 0 -label_parser_dict = None -label_parser = lambda l, _, label_parser_dict: l - -# smooth_factor = 100 -smooth_factor = 10 -smooth_factor = 0 -print("smooth factor:", smooth_factor) -eval_smooth_factor = 1 -leg_size = 30 - -def smooth(x_, n=50): - if type(x_) == list: - x_ = np.array(x_) - return np.array([x_[max(i - n, 0):i + 1].mean() for i in range(len(x_))]) - -sort_test = False -def sort_test_set(env_name): - helps = [ - "LanguageFeedback", - "LanguageColor", - "Pointing", - "Emulation", - ] - problems = [ - "Boxes", - "Switches", - "Generators", - "Marble", - "Doors", - "Levers", - ] - - env_names = [] - for p in problems: - for h in helps: - env_names.append(h+p) - - env_names.extend([ - "LeverDoorColl", - "MarblePushColl", - "MarblePassColl", - "AppleStealing" - ]) - - for i, en in enumerate(env_names): - if en in env_name: - return i - - raise ValueError(f"Test env {env_name} not known") - - - -subsample_step = 1 -load_subsample_step = 1 - -x_lim = 0 -max_x_lim = 17 -max_x_lim = np.inf -# x_lim = 100 - -summary_dict = {} -summary_dict_colors = {} - - -# default_colors = ["blue","orange","green","magenta", "brown", "red",'black',"grey",u'#ff7f0e', -# "cyan", "pink",'purple', u'#1f77b4', -# "darkorchid","sienna","lightpink", "indigo","mediumseagreen",'aqua', -# 'deeppink','silver','khaki','goldenrod','y','y','y','y','y','y','y','y','y','y','y','y' ] + ['y']*50 -default_colors_ = ["blue","orange","green","magenta", "brown", "red",'black',"grey",u'#ff7f0e', - "cyan", "pink",'purple', u'#1f77b4', - "darkorchid","sienna","lightpink", "indigo","mediumseagreen",'aqua', - 'deeppink','silver','khaki','goldenrod'] * 100 - - -def get_eval_data(logdir, eval_metric): - eval_data = defaultdict(lambda :defaultdict(list)) - - for root, _, files in os.walk(logdir): - for file in files: - if 'testing_' in file: - assert ".pkl" in file - test_env_name = file.lstrip("testing_").rstrip(".pkl") - try: - with open(root+"/"+file, "rb") as f: - seed_eval_data = pickle.load(f) - except: - print("Pickle not loaded: ", root+"/"+file) - time.sleep(1) - continue - - eval_data[test_env_name]["values"].append(seed_eval_data[eval_metric]) - eval_data[test_env_name]["steps"].append(seed_eval_data["test_step_nb"]) - - # if 'log.csv' in files: - # run_name = root[8:] - # exp_name = None - # - # config = None - # exp_idx += 1 - # - # # load progress data - # try: - # print(os.path.join(root, 'log.csv')) - # exp_data = pd.read_csv(os.path.join(root, 'log.csv')) - # except: - # size = (Path(root) / 'log.csv').stat().st_size - # if size == 0: - # raise ValueError("CSV {} empty".format(os.path.join(root, 'log.csv'))) - # else: - # raise ValueError("CSV {} faulty".format(os.path.join(root, 'log.csv'))) - # - # exp_data = exp_data[::load_subsample_step] - # data_dict = exp_data.to_dict("list") - # - # data_dict['config'] = config - # nb_epochs = len(data_dict['frames']) - # print('{} -> {}'.format(run_name, nb_epochs)) - - for test_env, seed_data in eval_data.items(): - min_len_seed = min([len(s) for s in seed_data['steps']]) - eval_data[test_env]["values"] = np.array([s[:min_len_seed] for s in eval_data[test_env]["values"]]) - eval_data[test_env]["steps"] = np.array([s[:min_len_seed] for s in eval_data[test_env]["steps"]]) - - return eval_data - -def get_all_runs(logdir, load_subsample_step=1): - """ - Recursively look through logdir for output files produced by - Assumes that any file "log.csv" is a valid hit. - """ - global exp_idx - global units - datasets = [] - for root, _, files in os.walk(logdir): - if 'log.csv' in files: - if (Path(root) / 'log.csv').stat().st_size == 0: - print("CSV {} empty".format(os.path.join(root, 'log.csv'))) - continue - - run_name = root[8:] - - exp_name = None - - config = None - exp_idx += 1 - - # load progress data - try: - exp_data = pd.read_csv(os.path.join(root, 'log.csv')) - print("Loaded:", os.path.join(root, 'log.csv')) - except: - raise ValueError("CSV {} faulty".format(os.path.join(root, 'log.csv'))) - - exp_data = exp_data[::load_subsample_step] - data_dict = exp_data.to_dict("list") - - data_dict['config'] = config - nb_epochs = len(data_dict['frames']) - if nb_epochs == 1: - print(f'{run_name} -> {colored(f"nb_epochs {nb_epochs}", "red")}') - else: - print('{} -> nb_epochs {}'.format(run_name, nb_epochs)) - - datasets.append(data_dict) - - return datasets - - -def get_datasets(rootdir, load_only="", load_subsample_step=1, ignore_patterns=("ignore"), require_patterns=()): - _, models_list, _ = next(os.walk(rootdir)) - for dir_name in models_list.copy(): - # add "ignore" in a directory name to avoid loading its content - for ignore_pattern in ignore_patterns: - if ignore_pattern in dir_name or load_only not in dir_name: - if dir_name in models_list: - models_list.remove(dir_name) - - if len(require_patterns) > 0: - if not any([require_pattern in dir_name for require_pattern in require_patterns]): - if dir_name in models_list: - models_list.remove(dir_name) - - for expe_name in list(labels.keys()): - if expe_name not in models_list: - del labels[expe_name] - - - # setting per-model type colors - for i, m_name in enumerate(models_list): - for m_type, m_color in per_model_colors.items(): - if m_type in m_name: - colors[m_name] = m_color - print("extracting data for {}...".format(m_name)) - m_id = m_name - models_saves[m_id] = OrderedDict() - models_saves[m_id]['data'] = get_all_runs(rootdir+m_name, load_subsample_step=load_subsample_step) - print("done") - - if m_name not in labels: - labels[m_name] = m_name - - model_eval_data[m_id] = get_eval_data(logdir=rootdir+m_name, eval_metric=eval_metric) - - """ - retrieve all experiences located in "data to vizu" folder - """ -labels = OrderedDict() -per_model_colors = OrderedDict() -# per_model_colors = OrderedDict([('ALP-GMM',u'#1f77b4'), -# ('hmn','pink'), -# ('ADR','black')]) - -# LOAD DATA -models_saves = OrderedDict() -colors = OrderedDict() -model_eval_data = OrderedDict() - -static_lines = {} -# get_datasets("storage/",load_only="RERUN_WizardGuide") -# get_datasets("storage/",load_only="RERUN_WizardTwoGuides") -try: - load_pattern = eval(sys.argv[1]) - -except: - load_pattern = sys.argv[1] - -ignore_patterns = ["_ignore_"] -require_patterns = [ - "_" -] - -# require_patterns = [ - # "dummy_cs_jz_scaf_A_E_N_A_E", - # "03-12_dummy_cs_jz_formats_AE", -# ] -# -# def label_parser(label, figure_id, label_parser_dict=None): -# if "single" in label: -# ty = "single" -# elif "group" in label: -# ty = "group" -# -# if "asoc" in label: -# return f"Asocial_pretrain({ty})" -# -# if "exp_soc" in label: -# return f"Role_B_pretrain({ty})" -# -# return label - - -# -# # DUMMY FORMATS -# require_patterns = [ -# "03-12_dummy_cs_formats_CBL", -# "dummy_cs_formats_CBL_N_rec_5" - # "03-12_dummy_cs_jz_formats_", - # "dummy_cs_jz_formats_N_rec_5" -# ] -# def label_parser(label, figure_id, label_parser_dict=None): -# if "CBL" in label: -# eb = "CBL" -# else: -# eb = "no_bonus" -# -# if "AE" in label: -# label = f"AE_PPO_{eb}" -# elif "E" in label: -# label = f"E_PPO_{eb}" -# elif "A" in label: -# label = f"A_PPO_{eb}" -# elif "N" in label: -# label = f"N_PPO_{eb}" -# -# return label -# - -# DUMMY CLASSIC -# require_patterns = [ - # "07-12_dummy_cs_NEW2_Pointing_sm_CB_very_small", - # "dummy_cs_JA_Pointing_CB_sm", - - # "06-12_dummy_cs_NEW_Color_CBL", - # "dummy_cs_JA_Color_CBL_new" - - # "07-12_dummy_cs_NEW2_Feedback_CBL", - # "dummy_cs_JA_Feedback_CBL_new" - - # "08-12_dummy_cs_emulation_no_distr_rec_5_CB_exploration-bonus-type_cell_exploration-bonus-params__1_50", - # "08-12_dummy_cs_emulation_no_distr_rec_5_CB", - - # "dummy_cs_RR_ft_NEW_single_CB_marble_pass_B_exp_soc", - # "dummy_cs_RR_ft_NEW_single_CB_marble_pass_B_contr_asoc", - - # "dummy_cs_RR_ft_NEW_group_CB_marble_pass_A_exp_soc", - # "dummy_cs_RR_ft_NEW_group_CB_marble_pass_A_contr_asoc" - - # "03-12_dummy_cs_jz_formats_A", - # "03-12_dummy_cs_jz_formats_E", - # "03-12_dummy_cs_jz_formats_AE", - # "dummy_cs_jz_formats_N_rec_5" - - # "03-12_dummy_cs_formats_CBL_A", - # "03-12_dummy_cs_formats_CBL_E", - # "03-12_dummy_cs_formats_CBL_AE", - # "dummy_cs_formats_CBL_N_rec_5" - - # "03-12_dummy_cs_jz_formats_AE", - # "dummy_cs_jz_scaf_A_E_N_A_E_full-AEfull", - # "dummy_cs_jz_scaf_A_E_N_A_E_scaf_full-AEfull", -# ] - -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.replace("07-12_dummy_cs_NEW2_Pointing_sm_CB_very_small", "PPO_CB") -# label = label.replace("dummy_cs_JA_Pointing_CB_sm", "JA_PPO_CB") -# -# label = label.replace("06-12_dummy_cs_NEW_Color_CBL", "PPO_CBL") -# label = label.replace("dummy_cs_JA_Color_CBL_new", "JA_PPO_CBL") -# -# label = label.replace("07-12_dummy_cs_NEW2_Feedback_CBL", "PPO_CBL") -# label = label.replace("dummy_cs_JA_Feedback_CBL_new", "JA_PPO_CBL") -# -# label = label.replace( -# "08-12_dummy_cs_emulation_no_distr_rec_5_CB_exploration-bonus-type_cell_exploration-bonus-params__1_50", -# "PPO_CB_1") -# label = label.replace( -# "08-12_dummy_cs_emulation_no_distr_rec_5_CB_exploration-bonus-type_cell_exploration-bonus-params__1_50", -# "PPO_CB_1") -# -# label = label.replace("dummy_cs_RR_ft_NEW_single_CB_marble_pass_B_exp_soc", "PPO_CB_role_B_single") -# label = label.replace("dummy_cs_RR_ft_NEW_single_CB_marble_pass_B_contr_asoc", "PPO_CB_asoc_single") -# -# label = label.replace("dummy_cs_RR_ft_NEW_group_CB_marble_pass_A_exp_soc", "PPO_CB_role_B_group") -# label = label.replace("dummy_cs_RR_ft_NEW_group_CB_marble_pass_A_contr_asoc", "PPO_CB_asoc_group") -# -# label = label.replace( -# "03-12_dummy_cs_formats_CBL_A_rec_5_env_SocialAI-ALangFeedbackTrainFormatsCSParamEnv-v1_recurrence_5_test-set-name_AFormatsTestSet_exploration-bonus-type_lang", -# "PPO_CBL_Ask") -# label = label.replace( -# "03-12_dummy_cs_formats_CBL_E_rec_5_env_SocialAI-ELangFeedbackTrainFormatsCSParamEnv-v1_recurrence_5_test-set-name_EFormatsTestSet_exploration-bonus-type_lang", -# "PPO_CBL_Eye_contact") -# label = label.replace( -# "03-12_dummy_cs_formats_CBL_AE_rec_5_env_SocialAI-AELangFeedbackTrainFormatsCSParamEnv-v1_recurrence_5_test-set-name_AEFormatsTestSet_exploration-bonus-type_lang", -# "PPO_CBL_Ask_Eye_contact") -# label = label.replace("dummy_cs_formats_CBL_N_rec_5", "PPO_CBL_No") -# -# label = label.replace( -# "03-12_dummy_cs_jz_formats_E_rec_5_env_SocialAI-ELangFeedbackTrainFormatsCSParamEnv-v1_recurrence_5_test-set-name_EFormatsTestSet", -# "PPO_no_bonus_Eye_contact") -# label = label.replace( -# "03-12_dummy_cs_jz_formats_A_rec_5_env_SocialAI-ALangFeedbackTrainFormatsCSParamEnv-v1_recurrence_5_test-set-name_AFormatsTestSet", -# "PPO_no_bonus_Ask") -# label = label.replace( -# "03-12_dummy_cs_jz_formats_AE_rec_5_env_SocialAI-AELangFeedbackTrainFormatsCSParamEnv-v1_recurrence_5_test-set-name_AEFormatsTestSet", -# "PPO_no_bonus_Ask_Eye_contact") -# label = label.replace("dummy_cs_jz_formats_N_rec_5", "PPO_no_bonus_No") -# -# label = label.replace("03-12_dummy_cs_jz_formats_AE", "PPO_no_bonus_no_scaf") -# label = label.replace("dummy_cs_jz_scaf_A_E_N_A_E_full-AEfull", "PPO_no_bonus_scaf_4") -# label = label.replace("dummy_cs_jz_scaf_A_E_N_A_E_scaf_full-AEfull", "PPO_no_bonus_scaf_8") -# -# return label - - -# Final case studies -require_patterns = [ - "_", - # pointing - # "04-01_Pointing_CB_heldout_doors", - - # # role reversal - # "03-01_RR_ft_single_CB_marble_pass_A_asoc_contr", - # "03-01_RR_ft_single_CB_marble_pass_A_soc_exp", - - # "05-01_RR_ft_group_50M_CB_marble_pass_A_asoc_contr", - # "05-01_RR_ft_group_50M_CB_marble_pass_A_soc_exp", - - # scaffolding - # "05-01_scaffolding_50M_no", - # "05-01_scaffolding_50M_acl_4_acl-type_intro_seq", - # "05-01_scaffolding_50M_acl_8_acl-type_intro_seq_scaf", -] - -def label_parser(label, figure_id, label_parser_dict=None): - label = label.replace("04-01_Pointing_CB_heldout_doors", "PPO_CB") - - label = label.replace("05-01_scaffolding_50M_no_acl", "PPO_no_scaf") - label = label.replace("05-01_scaffolding_50M_acl_4_acl-type_intro_seq", "PPO_scaf_4") - label = label.replace("05-01_scaffolding_50M_acl_8_acl-type_intro_seq_scaf", "PPO_scaf_8") - - label = label.replace("03-01_RR_ft_single_CB_marble_pass_A_soc_exp", "PPO_CB_role_B") - label = label.replace("03-01_RR_ft_single_CB_marble_pass_A_asoc_contr", "PPO_CB_asocial") - - label = label.replace("05-01_RR_ft_group_50M_CB_marble_pass_A_soc_exp", "PPO_CB_role_B") - label = label.replace("05-01_RR_ft_group_50M_CB_marble_pass_A_asoc_contr", "PPO_CB_asocial") - - return label - - -color_dict = { - - # JA - # "JA_PPO_CBL": "blue", - # "PPO_CBL": "orange", - - # RR group - # "PPO_CB_role_B_group": "orange", - # "PPO_CB_asoc_group": "blue" - - # formats No - # "PPO_no_bonus_No": "blue", - # "PPO_no_bonus_Eye_contact": "magenta", - # "PPO_no_bonus_Ask": "orange", - # "PPO_no_bonus_Ask_Eye_contact": "green" - - # formats CBL - # "PPO_CBL_No": "blue", - # "PPO_CBL_Eye_contact": "magenta", - # "PPO_CBL_Ask": "orange", - # "PPO_CBL_Ask_Eye_contact": "green" -} - -# # POINTING_GENERALIZATION (DUMMY) -# require_patterns = [ -# "29-10_SAI_Pointing_CS_PPO_CB_", -# "29-10_SAI_LangColor_CS_PPO_CB_" -# ] -# -# color_dict = { -# "dummy_cs_JA_Feedback_CBL_new": "blue", -# "dummy_cs_Feedback_CBL": "orange", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("Pointing_CS_PPO_CB", "PPO_CB_train(DUMMY)") -# label=label.replace("LangColor_CS_PPO_CB", "PPO_CB_test(DUMMY)") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Pointing_gen_eval.png" - -# # FEEDBACK GENERALIZATION (DUMMY) -# require_patterns = [ -# "29-10_SAI_LangFeedback_CS_PPO_CBL_", -# "29-10_SAI_LangColor_CS_PPO_CB_" -# ] -# -# color_dict = { -# "PPO_CBL_train(DUMMY)": "blue", -# "PPO_CBL_test(DUMMY)": "maroon", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangFeedback_CS_PPO_CBL", "PPO_CBL_train(DUMMY)") -# label=label.replace("LangColor_CS_PPO_CB", "PPO_CBL_test(DUMMY)") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Feedback_gen_eval.png" - -# # COLOR GENERALIZATION (DUMMY) -# require_patterns = [ -# "29-10_SAI_LangColor_CS_PPO_CBL_", -# "29-10_SAI_LangColor_CS_PPO_CB_" -# ] -# -# color_dict = { -# "PPO_CBL_train(DUMMY)": "blue", -# "PPO_CBL_test(DUMMY)": "maroon", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangColor_CS_PPO_CBL", "PPO_CBL_train(DUMMY)") -# label=label.replace("LangColor_CS_PPO_CB", "PPO_CBL_test(DUMMY)") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Color_gen_eval.png" - -# # POINTING - PILOT -# require_patterns = [ -# "29-10_SAI_Pointing_CS_PPO_", -# ] -# -# color_dict = { -# "PPO_RIDE": "orange", -# "PPO_RND": "magenta", -# "PPO_no": "maroon", -# "PPO_CBL": "green", -# "PPO_CB": "blue", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("Pointing_CS_", "") -# return label -# # -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Pointing_eval.png" - - -# LANGCOLOR - 7 Colors - PILOT -# require_patterns = [ -# "29-10_SAI_LangColor_CS_PPO_", -# ] -# -# color_dict = { -# "PPO_RIDE": "orange", -# "PPO_RND": "magenta", -# "PPO_no": "maroon", -# "PPO_CBL": "green", -# "PPO_CB": "blue", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangColor_CS_", "") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Color_eval.png" - -# # LangColor - CBL - 3 5 7 -# require_patterns = [ -# "02-11_SAI_LangColor_CS_5C_PPO_CBL", -# "02-11_SAI_LangColor_CS_3C_PPO_CBL", -# "29-10_SAI_LangColor_CS_PPO_CBL" -# ] - -# RND RIDE reference : RIDE > RND > no -# require_patterns = [ -# "24-08_new_ref", -# ] - - -# # # LANG FEEDBACK -# require_patterns = [ -# "24-10_SAI_LangFeedback_CS_PPO_", -# "29-10_SAI_LangFeedback_CS_PPO_", -# ] -# color_dict = { -# "PPO_RIDE": "orange", -# "PPO_RND": "magenta", -# "PPO_no": "maroon", -# "PPO_CBL": "green", -# "PPO_CB": "blue", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangFeedback_CS_", "") -# return label -# -# # eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Feedback_eval.png" -# - -# # ROLE REVERSAL - group (DUMMY) -# require_patterns = [ -# "24-10_SAI_LangFeedback_CS_PPO_CB_", -# "29-10_SAI_LangFeedback_CS_PPO_CBL_", -# ] -# color_dict = { -# "PPO_CB_experimental": "green", -# "PPO_CB_control": "blue", -# } -# color_dict=None -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangFeedback_CS_", "") -# -# label=label.replace("PPO_CB", "PPO_CB_control") -# label=label.replace("controlL", "experimental") -# -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/RR_dummy_group.png" - -# # ROLE REVERSAL - single (DUMMY) -# require_patterns = [ -# "24-10_SAI_LangFeedback_CS_PPO_CB_", -# "24-10_SAI_LangFeedback_CS_PPO_no_", -# ] -# color_dict = { -# "PPO_CB_experimental": "green", -# "PPO_CB_control": "blue", -# } -# color_dict=None -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangFeedback_CS_", "") -# -# label=label.replace("PPO_CB", "PPO_CB_control") -# label=label.replace("PPO_no", "PPO_CB_experimental") -# -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/RR_dummy_single.png" - -# # IMITATION train (DUMMY) -# require_patterns = [ -# "29-10_SAI_LangFeedback_CS_PPO_CBL_", -# "29-10_SAI_Pointing_CS_PPO_RIDE", -# ] -# -# color_dict = { -# "PPO_CB_no_distr(DUMMY)": "magenta", -# "PPO_CB_distr(DUMMY)": "orange", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangFeedback_CS_PPO_CBL", "PPO_CB_no_distr(DUMMY)") -# label=label.replace("Pointing_CS_PPO_RIDE", "PPO_CB_distr(DUMMY)") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Imitation_train.png" - -# # IMITATION test (DUMMY) -# require_patterns = [ -# "29-10_SAI_LangFeedback_CS_PPO_CBL_", -# "29-10_SAI_Pointing_CS_PPO_RIDE", -# ] -# -# color_dict = { -# "PPO_CB_no_distr(DUMMY)": "magenta", -# "PPO_CB_distr(DUMMY)": "orange", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("LangFeedback_CS_PPO_CBL", "PPO_CB_no_distr(DUMMY)") -# label=label.replace("Pointing_CS_PPO_RIDE", "PPO_CB_distr(DUMMY)") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Imitation_test.png" - - -# JA_POINTING -# require_patterns = [ -# "29-10_SAI_Pointing_CS_PPO_CB_", -# "04-11_SAI_JA_Pointing_CS_PPO_CB_less", # less reward -# ] -# color_dict = { -# "JA_Pointing_PPO_CB": "orange", -# "Pointing_PPO_CB": "blue", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("_CS_", "_") -# label=label.replace("_less_", "") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/JA_Pointing_eval.png" - - -# # JA_COLORS (JA, no) x (3,5,7) -# max_x_lim = 17 -# require_patterns = [ -# # "02-11_SAI_JA_LangColor", # max_x_lim = 17 -# "02-11_SAI_JA_LangColor_CS_3C", # max_x_lim = 17 -# # "02-11_SAI_LangColor_CS_5C_PPO_CBL", # max_x_lim = 17 -# "02-11_SAI_LangColor_CS_3C_PPO_CBL", -# # "29-10_SAI_LangColor_CS_PPO_CBL" -# ] -# color_dict = { -# "JA_LangColor_PPO_CBL": "orange", -# "LangColor_PPO_CBL": "blue", -# } - -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("_CS_", "_") -# label=label.replace("_3C_", "_") -# return label - -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/JA_Color_eval.png" - - -# JA_FEEDBACK -> max_xlim=17 -# max_x_lim = 17 -# require_patterns = [ -# "02-11_SAI_JA_LangFeedback_CS_PPO_CBL_", -# "29-10_SAI_LangFeedback_CS_PPO_CBL_", -# "dummy_cs_F", -# "dummy_cs_JA_F" -# ] -# color_dict = { -# "JA_LangFeedback_PPO_CBL": "orange", -# "LangFeedback_PPO_CBL": "blue", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("_CS_", "_") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/JA_Feedback_eval.png" - -# # Formats CBL -# require_patterns = [ -# "03-11_SAI_LangFeedback_CS_F_NO_PPO_CBL_env_SocialAI", -# "29-10_SAI_LangFeedback_CS_PPO_CBL_env_SocialAI", -# "03-11_SAI_LangFeedback_CS_F_ASK_PPO_CBL_env_SocialAI", -# "03-11_SAI_LangFeedback_CS_F_ASK_EYE_PPO_CBL_env_SocialAI", -# ] -# color_dict = { -# "LangFeedback_Eye_PPO_CBL": "blue", -# "LangFeedback_Ask_PPO_CBL": "orange", -# "LangFeedback_NO_PPO_CBL": "green", -# "LangFeedback_AskEye_PPO_CBL": "magenta", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("_CS_", "_") -# label=label.replace("_F_", "_") -# -# label=label.replace("LangFeedback_PPO", "LangFeedback_EYE_PPO") -# -# label=label.replace("EYE", "Eye") -# label=label.replace("No", "No") -# label=label.replace("ASK", "Ask") -# label=label.replace("Ask_Eye", "AskEye") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Formats_CBL_eval.png" - -# # Formats NO -# require_patterns = [ -# "24-10_SAI_LangFeedback_CS_PPO_no", # EYE -# "04-11_SAI_LangFeedback_CS_F_NO_PPO_NO_env_SocialAI", -# "04-11_SAI_LangFeedback_CS_F_ASK_PPO_NO_env_SocialAI", -# "04-11_SAI_LangFeedback_CS_F_ASK_EYE_PPO_NO_env_SocialAI", -# ] -# -# color_dict = { -# "LangFeedback_Eye_PPO_no": "blue", -# "LangFeedback_Ask_PPO_no": "orange", -# "LangFeedback_NO_PPO_no": "green", -# "LangFeedback_AskEye_PPO_no": "magenta", -# } -# -# def label_parser(label, figure_id, label_parser_dict=None): -# label = label.split("_env_")[0].split("SAI_")[1] -# label=label.replace("_CS_", "_") -# label=label.replace("_F_", "_") -# # -# label=label.replace("LangFeedback_PPO", "LangFeedback_EYE_PPO") -# label=label.replace("PPO_NO", "PPO_no") -# -# label=label.replace("EYE", "Eye") -# label=label.replace("No", "No") -# label=label.replace("ASK", "Ask") -# label=label.replace("Ask_Eye", "AskEye") -# return label -# -# eval_filename = f"/home/flowers/Documents/projects/embodied_acting_and_speaking/case_studies_figures/Formats_no_eval.png" - - -# -# require_patterns = [ -# "11-07_bAI_cb_GS_param_tanh_env_SocialAI-SocialAIParamEnv-v1_exploration-bonus-type_cell_exploration-bonus-params__2_50_exploration-bonus-tanh_0.6", -# # "04-11_SAI_ImitationDistr_CS_PPO_CB_small_env_SocialAI-EEmulationDistrInformationSeekingParamEnv-v1_recurrence_10", -# # "04-11_SAI_ImitationDistr_CS_PPO_CB_small_env_SocialAI-EEmulationDistrInformationSeekingParamEnv-v1_recurrence_10", -# "03-11_SAI_ImitationDistr_CS_PPO_CB_env_SocialAI-EEmulationDistrInformationSeekingParamEnv-v1_recurrence_10", -# # "04-11_SAI_ImitationNoDistr_CS_PPO_CB_small_env_SocialAI-EEmulationNoDistrInformationSeekingParamEnv-v1_recurrence_10", -# ] - -# require_patterns = [ -# "02-11_SAI_LangColor_CS_3C_PPO_CBL", -# "02-11_SAI_JA_LangColor_CS_3C_PPO_CBL", -# ] # at least one of those - - -# all of those -include_patterns = [ - "_" -] -#include_patterns = ["rec_5"] - -if eval_filename: - # saving - fontsize = 40 - legend_fontsize = 30 - linewidth = 10 -else: - fontsize = 5 - legend_fontsize = 5 - linewidth = 1 - -fontsize = 5 -legend_fontsize = 5 -linewidth = 1 - -title_fontsize = int(fontsize*1.2) - - -storage_dir = "storage/" -if load_pattern.startswith(storage_dir): - load_pattern = load_pattern[len(storage_dir):] - -if load_pattern.startswith("./storage/"): - load_pattern = load_pattern[len("./storage/"):] - -get_datasets(storage_dir, str(load_pattern), load_subsample_step=load_subsample_step, ignore_patterns=ignore_patterns, require_patterns=require_patterns) - -label_parser_dict = { - # "PPO_CB": "PPO_CB", - # "02-06_AppleStealing_experiments_cb_bonus_angle_occ_env_SocialAI-OthersPerceptionInferenceParamEnv-v1_exploration-bonus-type_cell": "NPC_visible", -} - -env_type = str(load_pattern) - -fig_type = "test" -try: - top_n = int(sys.argv[2]) -except: - top_n = 8 - -to_remove = [] - -for tr_ in to_remove: - if tr_ in models_saves: - del models_saves[tr_] - -print("Loaded:") -print("\n".join(list(models_saves.keys()))) - -#### get_datasets("storage/", "RERUN_WizardGuide_lang64_nameless") -#### get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_nameless") - - -if per_model_colors: # order runs for legend order as in per_models_colors, with corresponding colors - ordered_labels = OrderedDict() - for teacher_type in per_model_colors.keys(): - for k,v in labels.items(): - if teacher_type in k: - ordered_labels[k] = v - labels = ordered_labels -else: - print('not using per_model_color') - for k in models_saves.keys(): - labels[k] = k - -def plot_with_shade_seed(subplot_nb, ax, x, y, err, color, shade_color, label, - y_min=None, y_max=None, legend=False, leg_size=30, leg_loc='best', title=None, - ylim=[0,100], xlim=[0,40], leg_args={}, leg_linewidth=13.0, linewidth=10.0, labelsize=20, - filename=None, - zorder=None, xlabel='perf', ylabel='Env steps'): - - plt.rcParams.update({'font.size': 15}) - - plt.rcParams['axes.xmargin'] = 0 - plt.rcParams['axes.ymargin'] = 0 - - ax.locator_params(axis='x', nbins=3) - ax.locator_params(axis='y', nbins=3) - ax.tick_params(axis='both', which='major', labelsize=labelsize) - - x = x[:len(y)] - - # ax.scatter(x, y, color=color, linewidth=linewidth, zorder=zorder) - ax.plot(x, y, color=color, label=label, linewidth=linewidth, zorder=zorder) - - if err is not None: - ax.fill_between(x, y-err, y+err, color=shade_color, alpha=0.2) - - if legend: - leg = ax.legend(loc=leg_loc, **leg_args) #34 - for legobj in leg.legendHandles: - legobj.set_linewidth(leg_linewidth) - ax.set_xlabel(xlabel, fontsize=fontsize) - if subplot_nb == 0: - ax.set_ylabel(ylabel, fontsize=fontsize, labelpad=4) - - ax.set_xlim(xmin=xlim[0],xmax=xlim[1]) - ax.set_ylim(bottom=ylim[0],top=ylim[1]) - if title: - ax.set_title(title, fontsize=fontsize) - - # if filename is not None: - # f.savefig(filename) - - -# Plot utils -def plot_with_shade_grg(subplot_nb, ax, x, y, err, color, shade_color, label, - legend=False, leg_loc='best', title=None, - ylim=[0, 100], xlim=[0, 40], leg_args={}, leg_linewidth=13.0, linewidth=10.0, labelsize=20, fontsize=20, title_fontsize=30, - zorder=None, xlabel='Perf', ylabel='Env steps', linestyle="-", xnbins=3, ynbins=3, filename=None): - - #plt.rcParams.update({'font.size': 15}) - ax.locator_params(axis='x', nbins=xnbins) - ax.locator_params(axis='y', nbins=ynbins) - - ax.tick_params(axis='y', which='both', labelsize=labelsize) - ax.tick_params(axis='x', which='both', labelsize=labelsize*0.8) - # ax.tick_params(axis='both', which='both', labelsize="small") - - # ax.scatter(x, y, color=color,linewidth=linewidth,zorder=zorder, linestyle=linestyle) - ax.plot(x, y, color=color, label=label, linewidth=linewidth, zorder=zorder, linestyle=linestyle) - - ax.fill_between(x, y-err, y+err, color=shade_color, alpha=0.2) - - if legend: - leg = ax.legend(loc=leg_loc, **leg_args) # 34 - for legobj in leg.legendHandles: - legobj.set_linewidth(leg_linewidth) - - ax.set_xlabel(xlabel, fontsize=fontsize) - if subplot_nb == 0: - ax.set_ylabel(ylabel, fontsize=fontsize, labelpad=2) - - ax.set_xlim(xmin=xlim[0], xmax=xlim[1]) - ax.set_ylim(bottom=ylim[0], top=ylim[1]) - if title: - ax.set_title(title, fontsize=title_fontsize) - - # if filename is not None: - # f.savefig(filename) - - -# Metric plot -# metric = 'success_rate_mean' -# metric = 'mission_string_observed_mean' -# metric = 'extrinsic_return_mean' -# metric = 'extrinsic_return_max' -# metric = "rreturn_mean" -# metric = 'rreturn_max' -# metric = 'FPS' -# metric = 'duration' -# metric = 'intrinsic_reward_perf2_' -# metric = 'NPC_intro' - - -metrics = [ - 'success_rate_mean', - # 'FPS', - # 'extrinsic_return_mean', - # 'exploration_bonus_mean', - 'NPC_intro', - # 'curriculum_param_mean', - # 'curriculum_max_success_rate_mean', - # 'rreturn_mean' -] - -# f, ax = plt.subplots(1, len(metrics), figsize=(15.0, 9.0)) -f, ax = plt.subplots(1, len(metrics), figsize=(9.0, 9.0)) -# f, ax = plt.subplots(1, len(metrics), figsize=(20.0, 20.0)) -# f, ax = plt.subplots(1, 1, figsize=(5.0, 3.0)) - -if len(metrics) == 1: - ax = [ax] - -max_y = -np.inf -min_y = np.inf -# hardcoded -min_y, max_y = 0.0, 1.0 -max_steps = 0 -exclude_patterns = [] - - -# def label_parser(label, figure_id, label_parser_dict=None): -# -# label = label.split("_env_")[0].split("SAI_")[1] -# -# # # Pointing -# # label=label.replace("Pointing_CS_", "") -# -# # Feedback -# label=label.replace("LangFeedback_CS_", "") -# -# -# # label=label.replace("CS_PPO", "7COL_PPO") -# # label=label.replace("CS_3C_PPO", "3COL_PPO") -# # label=label.replace("CS_5C_PPO", "5COL_PPO") -# -# # label=label.replace("CS_PPO", "Eye_contact_PPO") -# # label=label.replace("CS_F_ASK_PPO", "Ask_PPO") -# # label=label.replace("CS_F_NO_PPO", "NO_PPO") -# # label=label.replace("CS_F_ASK_EYE_PPO", "Ask_Eye_contact_PPO") -# # -# # label=label.replace("PPO_no", "PPO_no_bonus") -# # label=label.replace("PPO_NO", "PPO_no_bonus") -# -# if label_parser_dict: -# if sum([1 for k, v in label_parser_dict.items() if k in label]) != 1: -# if label in label_parser_dict: -# # see if there is an exact match -# return label_parser_dict[label] -# else: -# print("ERROR multiple curves match a lable and there is no exact match for {}".format(label)) -# exit() -# -# for k, v in label_parser_dict.items(): -# if k in label: return v -# -# else: -# # return label.split("_env_")[1] -# if figure_id not in [1, 2, 3, 4]: -# return label -# else: -# # default -# pass -# -# return label - - -for metric_i, metric in enumerate(metrics): - min_y, max_y = 0.0, 1.0 - default_colors = default_colors_.copy() - for model_i, m_id in enumerate(models_saves.keys()): - - #excluding some experiments - if any([ex_pat in m_id for ex_pat in exclude_patterns]): - continue - if len(include_patterns) > 0: - if not any([in_pat in m_id for in_pat in include_patterns]): - continue - runs_data = models_saves[m_id]['data'] - ys = [] - - if runs_data[0]['frames'][1] == 'frames': - runs_data[0]['frames'] = list(filter(('frames').__ne__, runs_data[0]['frames'])) - ########################################### - - if per_seed: - min_len = None - - else: - # determine minimal run length across seeds - lens = [len(run['frames']) for run in runs_data if len(run['frames'])] - minimum = sorted(lens)[-min(top_n, len(lens))] - min_len = np.min([len(run['frames']) for run in runs_data if len(run['frames']) >= minimum]) - - # keep only top k - runs_data = [run for run in runs_data if len(run['frames']) >= minimum] - - # min_len = np.min([len(run['frames']) for run in runs_data if len(run['frames']) > 10]) - - # compute env steps (x axis) - longest_id = np.argmax([len(rd['frames']) for rd in runs_data]) - steps = np.array(runs_data[longest_id]['frames'], dtype=np.int) / steps_denom - steps = steps[:min_len] - - - for run in runs_data: - if metric not in run: - # succes_rate_mean <==> bin_extrinsic_return_mean - if metric == 'success_rate_mean': - metric_ = "bin_extrinsic_return_mean" - if metric_ not in run: - raise ValueError("Neither {} or {} is present: {} Possible metrics: {}. ".format(metric, metric_, list(run.keys()))) - - data = run[metric_] - - else: - raise ValueError("Unknown metric: {} Possible metrics: {}. ".format(metric, list(run.keys()))) - else: - data = run[metric] - - if data[1] == metric: - data = np.array(list(filter((metric).__ne__, data)), dtype=np.float16) - ########################################### - if per_seed: - ys.append(data) - else: - if len(data) >= min_len: - if len(data) > min_len: - print("run has too many {} datapoints ({}). Discarding {}".format(m_id, len(data), - len(data)-min_len)) - data = data[0:min_len] - ys.append(data) - else: - raise ValueError("How can data be < min_len if it was capped above") - - ys_same_len = ys - - # computes stats - n_seeds = len(ys_same_len) - - if per_seed: - sems = np.array(ys_same_len) - stds = np.array(ys_same_len) - means = np.array(ys_same_len) - color = default_colors[model_i] - - else: - sems = np.std(ys_same_len, axis=0)/np.sqrt(len(ys_same_len)) # sem - stds = np.std(ys_same_len, axis=0) # std - means = np.mean(ys_same_len, axis=0) - color = default_colors[model_i] - - # per-metric adjustments - ylabel = metric - - ylabel = { - "success_rate_mean" : "Success rate", - "exploration_bonus_mean": "Exploration bonus", - "NPC_intro": "Successful introduction (%)", - }.get(ylabel, ylabel) - - - if metric == 'duration': - ylabel = "time (hours)" - means = means / 3600 - sems = sems / 3600 - stds = stds / 3600 - - if per_seed: - #plot x y bounds - curr_max_y = np.max(np.max(means)) - curr_min_y = np.min(np.min(means)) - curr_max_steps = np.max(np.max(steps)) - - else: - # plot x y bounds - curr_max_y = np.max(means+stds) - curr_min_y = np.min(means-stds) - curr_max_steps = np.max(steps) - - if curr_max_y > max_y: - max_y = curr_max_y - if curr_min_y < min_y: - min_y = curr_min_y - - if curr_max_steps > max_steps: - max_steps = curr_max_steps - - if subsample_step: - steps = steps[0::subsample_step] - means = means[0::subsample_step] - stds = stds[0::subsample_step] - sems = sems[0::subsample_step] - ys_same_len = [y[0::subsample_step] for y in ys_same_len] - - # display seeds separtely - if per_seed: - for s_i, seed_ys in enumerate(ys_same_len): - seed_c = default_colors[model_i+s_i] - # label = m_id#+"(s:{})".format(s_i) - label = str(s_i) - seed_ys = smooth(seed_ys, smooth_factor) - plot_with_shade_seed(0, ax[metric_i], steps, seed_ys, None, seed_c, seed_c, label, - legend=draw_legend, xlim=[0, max_steps], ylim=[min_y, max_y], - leg_size=leg_size, xlabel=f"Env steps (1e6)", ylabel=ylabel, linewidth=linewidth, - labelsize=fontsize, - # fontsize=fontsize, - ) - - summary_dict[s_i] = seed_ys[-1] - summary_dict_colors[s_i] = seed_c - else: - label = label_parser(m_id, load_pattern, label_parser_dict=label_parser_dict) - - if color_dict: - color = color_dict[label] - else: - color = default_colors[model_i] - - label = label+"({})".format(n_seeds) - - - if smooth_factor: - means = smooth(means, smooth_factor) - stds = smooth(stds, smooth_factor) - - x_lim = max(steps[-1], x_lim) - x_lim = min(max_x_lim, x_lim) - - leg_args = { - 'fontsize': legend_fontsize - } - - plot_with_shade_grg( - 0, ax[metric_i], steps, means, stds, color, color, label, - legend=draw_legend and metric_i == 0, - xlim=[0, x_lim], - ylim=[0, max_y], - xlabel=f"Env steps (1e6)", - ylabel=ylabel, - title=None, - labelsize=fontsize*train_inc_font, - fontsize=fontsize*train_inc_font, - title_fontsize=title_fontsize, - linewidth=linewidth, - leg_linewidth=5, - leg_args=leg_args, - xnbins=xnbins, - ynbins=ynbins, - ) - summary_dict[label] = means[-1] - summary_dict_colors[label] = color - - if len(summary_dict) == 0: - raise ValueError(f"No experiments found for {load_pattern}.") - - # print summary - best = max(summary_dict.values()) - - pc = 0.3 - n = int(len(summary_dict)*pc) - print("top n: ", n) - - top_pc = sorted(summary_dict.values())[-n:] - bottom_pc = sorted(summary_dict.values())[:n] - - print("legend:") - cprint("\tbest", "green") - cprint("\ttop {} %".format(pc), "blue") - cprint("\tbottom {} %".format(pc), "red") - print("\tothers") - print() - - - for l, p in sorted(summary_dict.items(), key=lambda kv: kv[1]): - - c = summary_dict_colors[l] - if p == best: - cprint("label: {} ({})".format(l, c), "green") - cprint("\t {}:{}".format(metric, p), "green") - - elif p in top_pc: - cprint("label: {} ({})".format(l, c), "blue") - cprint("\t {}:{}".format(metric, p), "blue") - - elif p in bottom_pc: - cprint("label: {} ({})".format(l, c), "red") - cprint("\t {}:{}".format(metric, p), "red") - - else: - print("label: {} ({})".format(l, c)) - print("\t {}:{}".format(metric, p)) - - for label, (mean, std, color) in static_lines.items(): - plot_with_shade_grg( - 0, ax[metric_i], steps, np.array([mean]*len(steps)), np.array([std]*len(steps)), color, color, label, - legend=True, - xlim=[0, x_lim], - ylim=[0, 1.0], - xlabel=f"Env steps (1e6)", - ylabel=ylabel, - linestyle=":", - leg_args=leg_args, - fontsize=fontsize, - title_fontsize=title_fontsize, - xnbins=xnbins, - ynbins=ynbins, - ) - -# plt.tight_layout() -# f.savefig('graphics/{}_{}_results.svg'.format(str(figure_id, metric))) -# f.savefig('graphics/{}_{}_results.png'.format(str(figure_id, metric))) -cprint("Ignore pattern: {}".format(ignore_patterns), "blue") -if plot_train: - plt.tight_layout() - # plt.subplots_adjust(hspace=1.5, wspace=0.5, left=0.1, right=0.9, bottom=0.1, top=0.85) - plt.subplots_adjust(hspace=1.5, wspace=0.5, left=0.1, right=0.9, bottom=0.1, top=0.85) - plt.suptitle(super_title) - plt.show() -plt.close() - -curr_max_y = 0 -x_lim = 0 - -max_y = -np.inf -min_y = np.inf -# hardcoded -min_y, max_y = 0.0, 1.0 - -grid = True -draw_eval_legend = True - -if study_eval: - print("Evaluation") - # evaluation sets - number_of_eval_envs = max(list([len(v.keys()) for v in model_eval_data.values()])) - - if plot_aggregated_test: - number_of_eval_envs += 1 - - if number_of_eval_envs == 0: - print("No eval envs") - exit() - - if plot_only_aggregated_test: - f, ax = plt.subplots(1, 1, figsize=(9.0, 9.0)) - - else: - if grid: - # grid - subplot_y = math.ceil(math.sqrt(number_of_eval_envs)) - subplot_x = math.ceil(number_of_eval_envs / subplot_y) - # from IPython import embed; embed() - - while subplot_x % 1 != 0: - subplot_y -= 1 - subplot_x = number_of_eval_envs / subplot_y - - if subplot_x == 1: - subplot_y = math.ceil(math.sqrt(number_of_eval_envs)) - subplot_x = math.floor(math.sqrt(number_of_eval_envs)) - - subplot_y = int(subplot_y) - subplot_x = int(subplot_x) - - assert subplot_y * subplot_x >= number_of_eval_envs - - f, ax_ = plt.subplots(subplot_y, subplot_x, figsize=(6.0, 6.0), sharey=False) #, sharex=True, sharey=True) - - if subplot_y != 1: - ax = list(chain.from_iterable(ax_)) - else: - ax=ax_ - - else: - # flat - f, ax = plt.subplots(1, number_of_eval_envs, figsize=(15.0, 9.0)) #), sharey=True, sharex=True) - - if number_of_eval_envs == 1: - ax = [ax] - - default_colors = default_colors_.copy() - - test_summary_dict = defaultdict(dict) - test_summary_dict_colors = defaultdict(dict) - - for model_i, m_id in enumerate(model_eval_data.keys()): - # excluding some experiments - if any([ex_pat in m_id for ex_pat in exclude_patterns]): - continue - if len(include_patterns) > 0: - if not any([in_pat in m_id for in_pat in include_patterns]): - continue - - # computes stats - if sort_test: - test_envs_sorted = enumerate(sorted(model_eval_data[m_id].items(), key=lambda kv: sort_test_set(kv[0]))) - else: - test_envs_sorted = enumerate(model_eval_data[m_id].items()) - - if plot_aggregated_test: - agg_means = [] - - for env_i, (test_env, env_data) in test_envs_sorted: - ys_same_len = env_data["values"] - steps = env_data["steps"].mean(0) / steps_denom - n_seeds = len(ys_same_len) - - if per_seed: - sems = np.array(ys_same_len) - stds = np.array(ys_same_len) - means = np.array(ys_same_len) - color = default_colors[model_i] - - else: - sems = np.std(ys_same_len, axis=0) / np.sqrt(len(ys_same_len)) # sem - stds = np.std(ys_same_len, axis=0) # std - means = np.mean(ys_same_len, axis=0) - color = default_colors[model_i] - - # per-metric adjusments - - if per_seed: - # plot x y bounds - curr_max_y = np.max(np.max(means)) - curr_min_y = np.min(np.min(means)) - curr_max_steps = np.max(np.max(steps)) - - else: - # plot x y bounds - curr_max_y = np.max(means + stds) - curr_min_y = np.min(means - stds) - curr_max_steps = np.max(steps) - - if plot_aggregated_test: - agg_means.append(means) - - if curr_max_y > max_y: - max_y = curr_max_y - if curr_min_y < min_y: - min_y = curr_min_y - - x_lim = max(steps[-1], x_lim) - x_lim = min(max_x_lim, x_lim) - - eval_metric_name = { - "test_success_rates": "Success rate", - 'exploration_bonus_mean': "Exploration bonus", - - }.get(eval_metric, eval_metric) - - test_env_name = test_env.replace("Env", "").replace("Test", "") - - env_types = ["InformationSeeking", "Collaboration", "PerspectiveTaking"] - for env_type in env_types: - if env_type in test_env_name: - test_env_name = test_env_name.replace(env_type, "") - test_env_name += f"\n({env_type})" - - if grid: - ylabel = eval_metric_name - title = test_env_name - - else: - # flat - ylabel = test_env_name - title = eval_metric_name - - leg_args = { - 'fontsize': legend_fontsize // 1 - } - - if per_seed: - for s_i, seed_ys in enumerate(ys_same_len): - seed_c = default_colors[model_i + s_i] - # label = m_id#+"(s:{})".format(s_i) - label = str(s_i) - - if not plot_only_aggregated_test: - seed_ys = smooth(seed_ys, eval_smooth_factor) - plot_with_shade_seed(0, ax[env_i], steps, seed_ys, None, seed_c, seed_c, label, - legend=draw_eval_legend, xlim=[0, x_lim], ylim=[min_y, max_y], - leg_size=leg_size, xlabel=f"Steps (1e6)", ylabel=ylabel, linewidth=linewidth, title=title) - - test_summary_dict[s_i][test_env] = seed_ys[-1] - test_summary_dict_colors[s_i] = seed_c - else: - label = label_parser(m_id, load_pattern, label_parser_dict=label_parser_dict) - - if not plot_only_aggregated_test: - - if color_dict: - color = color_dict[label] - else: - color = default_colors[model_i] - - label = label + "({})".format(n_seeds) - - if smooth_factor: - means = smooth(means, eval_smooth_factor) - stds = smooth(stds, eval_smooth_factor) - - plot_with_shade_grg( - 0, ax[env_i], steps, means, stds, color, color, label, - legend=draw_eval_legend, - xlim=[0, x_lim+1], - ylim=[0, max_y], - xlabel=f"Env steps (1e6)" if env_i // (subplot_x) == subplot_y -1 else None, # only last line - ylabel=ylabel if env_i % subplot_x == 0 else None, # only first row - title=title, - title_fontsize=title_fontsize, - labelsize=fontsize, - fontsize=fontsize, - linewidth=linewidth, - leg_linewidth=5, - leg_args=leg_args, - xnbins=xnbins, - ynbins=ynbins, - ) - - test_summary_dict[label][test_env] = means[-1] - test_summary_dict_colors[label] = color - - if plot_aggregated_test: - if plot_only_aggregated_test: - agg_env_i = 0 - else: - agg_env_i = number_of_eval_envs - 1 # last one - - agg_means = np.array(agg_means) - agg_mean = agg_means.mean(axis=0) - agg_std = agg_means.std(axis=0) # std - - if smooth_factor and not per_seed: - agg_mean = smooth(agg_mean, eval_smooth_factor) - agg_std = smooth(agg_std, eval_smooth_factor) - - if color_dict: - color = color_dict[re.sub("\([0-9]\)", '', label)] - else: - color = default_colors[model_i] - - if per_seed: - print("Not smooth aggregated because of per seed") - for s_i, (seed_ys, seed_st) in enumerate(zip(agg_mean, agg_std)): - seed_c = default_colors[model_i + s_i] - # label = m_id#+"(s:{})".format(s_i) - label = str(s_i) - # seed_ys = smooth(seed_ys, eval_smooth_factor) - plot_with_shade_seed(0, - ax if plot_only_aggregated_test else ax[agg_env_i], - steps, seed_ys, seed_st, seed_c, seed_c, label, - legend=draw_eval_legend, xlim=[0, x_lim], ylim=[min_y, max_y], - labelsize=fontsize, - filename=eval_filename, - leg_size=leg_size, xlabel=f"Steps (1e6)", ylabel=ylabel, linewidth=1, title=agg_title) - else: - - # just used for creating a dummy Imitation test figure -> delete - # agg_mean = agg_mean * 0.1 - # agg_std = agg_std * 0.1 - # max_y = 1 - - plot_with_shade_grg( - 0, - ax if plot_only_aggregated_test else ax[agg_env_i], - steps, agg_mean, agg_std, color, color, label, - legend=draw_eval_legend, - xlim=[0, x_lim + 1], - ylim=[0, max_y], - xlabel=f"Steps (1e6)" if plot_only_aggregated_test or (agg_env_i // (subplot_x) == subplot_y - 1) else None, # only last line - ylabel=ylabel if plot_only_aggregated_test or (agg_env_i % subplot_x == 0) else None, # only first row - title_fontsize=title_fontsize, - title=agg_title, - labelsize=fontsize, - fontsize=fontsize, - linewidth=linewidth, - leg_linewidth=5, - leg_args=leg_args, - xnbins=xnbins, - ynbins=ynbins, - filename=eval_filename, - ) - - # print summary - - means_dict = { - lab: np.array(list(lab_sd.values())).mean() for lab, lab_sd in test_summary_dict.items() - } - best = max(means_dict.values()) - - pc = 0.3 - n = int(len(means_dict) * pc) - print("top n: ", n) - - top_pc = sorted(means_dict.values())[-n:] - bottom_pc = sorted(means_dict.values())[:n] - - print("Legend:") - cprint("\tbest", "green") - cprint("\ttop {} %".format(pc), "blue") - cprint("\tbottom {} %".format(pc), "red") - print("\tothers") - print() - - for l, l_mean in sorted(means_dict.items(), key=lambda kv: kv[1]): - - l_summary_dict = test_summary_dict[l] - - c = test_summary_dict_colors[l] - print("label: {} ({})".format(l, c)) - - #print("\t{}({}) - Mean".format(l_mean, metric)) - - if l_mean == best: - cprint("\t{}({}) - Mean".format(l_mean, eval_metric), "green") - - elif l_mean in top_pc: - cprint("\t{}({}) - Mean".format(l_mean, eval_metric), "blue") - - elif l_mean in bottom_pc: - cprint("\t{}({}) - Mean".format(l_mean, eval_metric), "red") - - else: - print("\t{}({})".format(l_mean, eval_metric)) - - n_over_50 = 0 - - if sort_test: - sorted_envs = sorted(l_summary_dict.items(), key=lambda kv: sort_test_set(env_name=kv[0])) - else: - sorted_envs = l_summary_dict.items() - - for tenv, p in sorted_envs: - if p < 0.5: - print("\t{:4f}({}) - \t{}".format(p, eval_metric, tenv)) - else: - print("\t{:4f}({}) -*\t{}".format(p, eval_metric, tenv)) - n_over_50 += 1 - print("\tenv over 50 - {}/{}".format(n_over_50, len(l_summary_dict))) - - if plot_test: - plt.tight_layout() - # plt.subplots_adjust(hspace=0.8, wspace=0.15, left=0.035, right=0.99, bottom=0.065, top=0.93) - plt.show() - - if eval_filename is not None: - plt.subplots_adjust(hspace=0.8, wspace=0.15, left=0.15, right=0.99, bottom=0.15, top=0.93) - - res= input(f"Save to {eval_filename} (y/n)?") - if res == "y": - f.savefig(eval_filename) - print(f'saved to {eval_filename}') - else: - print('not saved') diff --git a/spaces/geniius/ogkalu-Comic-Diffusion/app.py b/spaces/geniius/ogkalu-Comic-Diffusion/app.py deleted file mode 100644 index dacd1f283828ac4113a91b1d67e6009ba63762dc..0000000000000000000000000000000000000000 --- a/spaces/geniius/ogkalu-Comic-Diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/ogkalu/Comic-Diffusion").launch() \ No newline at end of file diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context.py deleted file mode 100644 index ff65bad1b86d7e3a5980bb5b9fc55798dc8df5f4..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context.py +++ /dev/null @@ -1,60 +0,0 @@ -# dataset settings -dataset_type = 'PascalContextDataset' -data_root = 'data/VOCdevkit/VOC2010/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -img_scale = (520, 520) -crop_size = (480, 480) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale, - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/train.txt', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/val.txt', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/val.txt', - pipeline=test_pipeline)) diff --git a/spaces/gotiQspiryo/whisper-ui/examples/1000 Kambi Kathakal Pdf Free VERIFIEDl.md b/spaces/gotiQspiryo/whisper-ui/examples/1000 Kambi Kathakal Pdf Free VERIFIEDl.md deleted file mode 100644 index 327df5351d4d06485106d67f5571a6bd147cf6f6..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/1000 Kambi Kathakal Pdf Free VERIFIEDl.md +++ /dev/null @@ -1,6 +0,0 @@ -

      1000 Kambi Kathakal Pdf Freel


      Download Ziphttps://urlgoal.com/2uyMuj



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Constantine 2 full movie in hindi free download Find out what happens next in the saga of the anti-hero.md b/spaces/gotiQspiryo/whisper-ui/examples/Constantine 2 full movie in hindi free download Find out what happens next in the saga of the anti-hero.md deleted file mode 100644 index 63ecffcdab669e13ca5cb531d8170e03dba873dd..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Constantine 2 full movie in hindi free download Find out what happens next in the saga of the anti-hero.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Constantine 2 full movie in hindi free download


      Download Zip ===> https://urlgoal.com/2uyNj8



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gradio/HuBERT/docs/Makefile b/spaces/gradio/HuBERT/docs/Makefile deleted file mode 100644 index c2f5b1a89cfc9e02d1bb09027d9e1e520ba53d53..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = python -msphinx -SPHINXPROJ = fairseq -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/byte_level_bpe/get_data.sh b/spaces/gradio/HuBERT/examples/byte_level_bpe/get_data.sh deleted file mode 100644 index c3d55d4925a6e6e23d12d293f093c1ae14acf76e..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/byte_level_bpe/get_data.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -PY_BIN_ROOT= - -# PyPI dependency -${PY_BIN_ROOT}pip install sentencepiece sacremoses - -# Get data -if [ ! -d "data" ]; then - mkdir data -fi - -if [ ! -f "data/fr-en.tgz" ]; then - wget https://wit3.fbk.eu/archive/2017-01-trnted/texts/fr/en/fr-en.tgz -P data - tar xvf data/fr-en.tgz -C data -fi -${PY_BIN_ROOT}python get_bitext.py --bpe-vocab 16384 --byte-vocab --char-vocab -for VOCAB_SIZE in 2048 4096; do - ${PY_BIN_ROOT}python get_bitext.py --bpe-vocab ${VOCAB_SIZE} --bbpe-vocab ${VOCAB_SIZE} -done -rm -r data/fr-en data/fr-en.tgz - -# Generate binary dataset -${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_bpe16384 --joined-dictionary \ - --workers "$(nproc)" --trainpref data/train.moses.bpe16384 --validpref data/valid.moses.bpe16384 \ - --testpref data/test.moses.bpe16384 - -${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_bytes --joined-dictionary \ - --workers "$(nproc)" --trainpref data/train.moses.bytes --validpref data/valid.moses.bytes \ - --testpref data/test.moses.bytes - -${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_chars --joined-dictionary \ - --workers "$(nproc)" --trainpref data/train.moses.chars --validpref data/valid.moses.chars \ - --testpref data/test.moses.chars - -for VOCAB_SIZE in 2048 4096; do - for TYPE in bbpe bpe; do - ${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir "data/bin_${TYPE}${VOCAB_SIZE}" \ - --joined-dictionary --workers "$(nproc)" --trainpref "data/train.moses.${TYPE}${VOCAB_SIZE}" \ - --validpref "data/valid.moses.${TYPE}${VOCAB_SIZE}" --testpref "data/test.moses.${TYPE}${VOCAB_SIZE}" - done -done diff --git a/spaces/gradio/HuBERT/examples/simultaneous_translation/utils/p_choose_strategy.py b/spaces/gradio/HuBERT/examples/simultaneous_translation/utils/p_choose_strategy.py deleted file mode 100644 index 308227ed96d8ee94b66bc0df343c96abbe2c55cc..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/simultaneous_translation/utils/p_choose_strategy.py +++ /dev/null @@ -1,124 +0,0 @@ -from typing import Optional, Dict -from torch import Tensor -import torch - - -def waitk( - query, key, waitk_lagging: int, num_heads: int, key_padding_mask: Optional[Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None -): - if incremental_state is not None: - # Retrieve target length from incremental states - # For inference the length of query is always 1 - tgt_len = incremental_state["steps"]["tgt"] - assert tgt_len is not None - tgt_len = int(tgt_len) - else: - tgt_len, bsz, _ = query.size() - - max_src_len, bsz, _ = key.size() - - if max_src_len < waitk_lagging: - if incremental_state is not None: - tgt_len = 1 - return query.new_zeros( - bsz * num_heads, tgt_len, max_src_len - ) - - # Assuming the p_choose looks like this for wait k=3 - # src_len = 6, tgt_len = 5 - # [0, 0, 1, 0, 0, 0, 0] - # [0, 0, 0, 1, 0, 0, 0] - # [0, 0, 0, 0, 1, 0, 0] - # [0, 0, 0, 0, 0, 1, 0] - # [0, 0, 0, 0, 0, 0, 1] - # linearize the p_choose matrix: - # [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...] - # The indices of linearized matrix that equals 1 is - # 2 + 6 * 0 - # 3 + 6 * 1 - # ... - # n + src_len * n + k - 1 = n * (src_len + 1) + k - 1 - # n from 0 to tgt_len - 1 - # - # First, generate the indices (activate_indices_offset: bsz, tgt_len) - # Second, scatter a zeros tensor (bsz, tgt_len * src_len) - # with activate_indices_offset - # Third, resize the tensor to (bsz, tgt_len, src_len) - - activate_indices_offset = ( - ( - torch.arange(tgt_len) * (max_src_len + 1) - + waitk_lagging - 1 - ) - .unsqueeze(0) - .expand(bsz, tgt_len) - .to(query) - .long() - ) - - if key_padding_mask is not None: - if key_padding_mask[:, 0].any(): - # Left padding - activate_indices_offset += ( - key_padding_mask.sum(dim=1, keepdim=True) - ) - - # Need to clamp the indices that are too large - activate_indices_offset = ( - activate_indices_offset - .clamp( - 0, - min( - [ - tgt_len, - max_src_len - waitk_lagging + 1 - ] - ) * max_src_len - 1 - ) - ) - - p_choose = torch.zeros(bsz, tgt_len * max_src_len).to(query) - - p_choose = p_choose.scatter( - 1, - activate_indices_offset, - 1.0 - ).view(bsz, tgt_len, max_src_len) - - if incremental_state is not None: - p_choose = p_choose[:, -1:] - tgt_len = 1 - - # Extend to each head - p_choose = ( - p_choose.contiguous() - .unsqueeze(1) - .expand(-1, num_heads, -1, -1) - .contiguous() - .view(-1, tgt_len, max_src_len) - ) - - return p_choose - - -def hard_aligned(q_proj: Optional[Tensor], k_proj: Optional[Tensor], attn_energy, noise_mean: float = 0.0, noise_var: float = 0.0, training: bool = True): - """ - Calculating step wise prob for reading and writing - 1 to read, 0 to write - """ - - noise = 0 - if training: - # add noise here to encourage discretness - noise = ( - torch.normal(noise_mean, noise_var, attn_energy.size()) - .type_as(attn_energy) - .to(attn_energy.device) - ) - - p_choose = torch.sigmoid(attn_energy + noise) - _, _, tgt_len, src_len = p_choose.size() - - # p_choose: bsz * self.num_heads, tgt_len, src_len - return p_choose.view(-1, tgt_len, src_len) diff --git a/spaces/gradio/HuBERT/examples/speech_recognition/criterions/ASG_loss.py b/spaces/gradio/HuBERT/examples/speech_recognition/criterions/ASG_loss.py deleted file mode 100644 index 41f50bbd70388ce723f2d316d4e9776bcd6be3c9..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/speech_recognition/criterions/ASG_loss.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from examples.speech_recognition.data.replabels import pack_replabels -from fairseq import utils -from fairseq.criterions import FairseqCriterion, register_criterion - - -@register_criterion("asg_loss") -class ASGCriterion(FairseqCriterion): - @staticmethod - def add_args(parser): - group = parser.add_argument_group("ASG Loss") - group.add_argument( - "--asg-transitions-init", - help="initial diagonal value of transition matrix", - type=float, - default=0.0, - ) - group.add_argument( - "--max-replabel", help="maximum # of replabels", type=int, default=2 - ) - group.add_argument( - "--linseg-updates", - help="# of training updates to use LinSeg initialization", - type=int, - default=0, - ) - group.add_argument( - "--hide-linseg-messages", - help="hide messages about LinSeg initialization", - action="store_true", - ) - - def __init__( - self, - task, - silence_token, - asg_transitions_init, - max_replabel, - linseg_updates, - hide_linseg_messages, - ): - from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode - - super().__init__(task) - self.tgt_dict = task.target_dictionary - self.eos = self.tgt_dict.eos() - self.silence = ( - self.tgt_dict.index(silence_token) - if silence_token in self.tgt_dict - else None - ) - self.max_replabel = max_replabel - - num_labels = len(self.tgt_dict) - self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT) - self.asg.trans = torch.nn.Parameter( - asg_transitions_init * torch.eye(num_labels), requires_grad=True - ) - - self.linseg_progress = torch.nn.Parameter( - torch.tensor([0], dtype=torch.int), requires_grad=False - ) - self.linseg_maximum = linseg_updates - self.linseg_message_state = "none" if hide_linseg_messages else "start" - - @classmethod - def build_criterion(cls, args, task): - return cls( - task, - args.silence_token, - args.asg_transitions_init, - args.max_replabel, - args.linseg_updates, - args.hide_linseg_messages, - ) - - def linseg_step(self): - if not self.training: - return False - if self.linseg_progress.item() < self.linseg_maximum: - if self.linseg_message_state == "start": - print("| using LinSeg to initialize ASG") - self.linseg_message_state = "finish" - self.linseg_progress.add_(1) - return True - elif self.linseg_message_state == "finish": - print("| finished LinSeg initialization") - self.linseg_message_state = "none" - return False - - def replace_eos_with_silence(self, tgt): - if tgt[-1] != self.eos: - return tgt - elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence): - return tgt[:-1] - else: - return tgt[:-1] + [self.silence] - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - - net_output = model(**sample["net_input"]) - emissions = net_output["encoder_out"].transpose(0, 1).contiguous() - B = emissions.size(0) - T = emissions.size(1) - device = emissions.device - - target = torch.IntTensor(B, T) - target_size = torch.IntTensor(B) - using_linseg = self.linseg_step() - - for b in range(B): - initial_target_size = sample["target_lengths"][b].item() - if initial_target_size == 0: - raise ValueError("target size cannot be zero") - - tgt = sample["target"][b, :initial_target_size].tolist() - tgt = self.replace_eos_with_silence(tgt) - tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel) - tgt = tgt[:T] - - if using_linseg: - tgt = [tgt[t * len(tgt) // T] for t in range(T)] - - target[b][: len(tgt)] = torch.IntTensor(tgt) - target_size[b] = len(tgt) - - loss = self.asg.forward(emissions, target.to(device), target_size.to(device)) - - if reduce: - loss = torch.sum(loss) - - sample_size = ( - sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"] - ) - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - } - return loss, sample_size, logging_output - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - agg_output = { - "loss": loss_sum / nsentences, - "ntokens": ntokens, - "nsentences": nsentences, - "sample_size": sample_size, - } - return agg_output diff --git a/spaces/grisiemjahand/Image-and-3D-Model-Creator/PIFu/lib/sample_util.py b/spaces/grisiemjahand/Image-and-3D-Model-Creator/PIFu/lib/sample_util.py deleted file mode 100644 index d0b105d148d6d8fddc461d1c04f659200957c189..0000000000000000000000000000000000000000 --- a/spaces/grisiemjahand/Image-and-3D-Model-Creator/PIFu/lib/sample_util.py +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np - - -def save_samples_truncted_prob(fname, points, prob): - ''' - Save the visualization of sampling to a ply file. - Red points represent positive predictions. - Green points represent negative predictions. - :param fname: File name to save - :param points: [N, 3] array of points - :param prob: [N, 1] array of predictions in the range [0~1] - :return: - ''' - r = (prob > 0.5).reshape([-1, 1]) * 255 - g = (prob < 0.5).reshape([-1, 1]) * 255 - b = np.zeros(r.shape) - - to_save = np.concatenate([points, r, g, b], axis=-1) - return np.savetxt(fname, - to_save, - fmt='%.6f %.6f %.6f %d %d %d', - comments='', - header=( - 'ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header').format( - points.shape[0]) - ) - - -def save_samples_rgb(fname, points, rgb): - ''' - Save the visualization of sampling to a ply file. - Red points represent positive predictions. - Green points represent negative predictions. - :param fname: File name to save - :param points: [N, 3] array of points - :param rgb: [N, 3] array of rgb values in the range [0~1] - :return: - ''' - to_save = np.concatenate([points, rgb * 255], axis=-1) - return np.savetxt(fname, - to_save, - fmt='%.6f %.6f %.6f %d %d %d', - comments='', - header=( - 'ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header').format( - points.shape[0]) - ) diff --git a/spaces/grupo10/risk-of-death-in-road-incident/README.md b/spaces/grupo10/risk-of-death-in-road-incident/README.md deleted file mode 100644 index 951375795c15b98de166c436eaa9da2fb753913b..0000000000000000000000000000000000000000 --- a/spaces/grupo10/risk-of-death-in-road-incident/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Risk Of Death In Road Incident -emoji: 📚 -colorFrom: purple -colorTo: blue -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py deleted file mode 100644 index daa7a98457de533545a16b2e09030d8414c5b00e..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py +++ /dev/null @@ -1,91 +0,0 @@ -from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module -from encoder4editing.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm - -""" -Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Backbone(Module): - def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): - super(Backbone, self).__init__() - assert input_size in [112, 224], "input_size should be 112 or 224" - assert num_layers in [ - 50, 100, 152], "num_layers should be 50, 100 or 152" - assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - if input_size == 112: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 7 * 7, 512), - BatchNorm1d(512, affine=affine)) - else: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 14 * 14, 512), - BatchNorm1d(512, affine=affine)) - - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer(x) - return l2_norm(x) - - -def IR_50(input_size): - """Constructs a ir-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir', - drop_ratio=0.4, affine=False) - return model - - -def IR_101(input_size): - """Constructs a ir-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir', - drop_ratio=0.4, affine=False) - return model - - -def IR_152(input_size): - """Constructs a ir-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir', - drop_ratio=0.4, affine=False) - return model - - -def IR_SE_50(input_size): - """Constructs a ir_se-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir_se', - drop_ratio=0.4, affine=False) - return model - - -def IR_SE_101(input_size): - """Constructs a ir_se-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir_se', - drop_ratio=0.4, affine=False) - return model - - -def IR_SE_152(input_size): - """Constructs a ir_se-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir_se', - drop_ratio=0.4, affine=False) - return model diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md deleted file mode 100644 index 52d299886a457480d27c54a27734a704786a1d28..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: "🐛 Bugs" -about: Report bugs in detectron2 -title: Please read & provide the following - ---- - -## Instructions To Reproduce the 🐛 Bug: - -1. what changes you made (`git diff`) or what code you wrote -``` - -``` -2. what exact command you run: -3. what you observed (including __full logs__): -``` - -``` -4. please simplify the steps as much as possible so they do not require additional resources to - run, such as a private dataset. - -## Expected behavior: - -If there are no obvious error in "what you observed" provided above, -please tell us the expected behavior. - -## Environment: - -Provide your environment information using the following command: -``` -wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py -``` - -If your issue looks like an installation issue / environment issue, -please first try to solve it yourself with the instructions in -https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/engine/launch.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/engine/launch.py deleted file mode 100644 index 9efbb0395d2c788d8cfe2cbbf66cde6ddc053585..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/engine/launch.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import torch -import torch.distributed as dist -import torch.multiprocessing as mp - -from detectron2.utils import comm - -__all__ = ["launch"] - - -def _find_free_port(): - import socket - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - # Binding to port 0 will cause the OS to find an available port for us - sock.bind(("", 0)) - port = sock.getsockname()[1] - sock.close() - # NOTE: there is still a chance the port could be taken by other processes. - return port - - -def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()): - """ - Args: - main_func: a function that will be called by `main_func(*args)` - num_machines (int): the total number of machines - machine_rank (int): the rank of this machine (one per machine) - dist_url (str): url to connect to for distributed jobs, including protocol - e.g. "tcp://127.0.0.1:8686". - Can be set to "auto" to automatically select a free port on localhost - args (tuple): arguments passed to main_func - """ - world_size = num_machines * num_gpus_per_machine - if world_size > 1: - # https://github.com/pytorch/pytorch/pull/14391 - # TODO prctl in spawned processes - - if dist_url == "auto": - assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." - port = _find_free_port() - dist_url = f"tcp://127.0.0.1:{port}" - if num_machines > 1 and dist_url.startswith("file://"): - logger = logging.getLogger(__name__) - logger.warning( - "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" - ) - - mp.spawn( - _distributed_worker, - nprocs=num_gpus_per_machine, - args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args), - daemon=False, - ) - else: - main_func(*args) - - -def _distributed_worker( - local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args -): - assert torch.cuda.is_available(), "cuda is not available. Please check your installation." - global_rank = machine_rank * num_gpus_per_machine + local_rank - try: - dist.init_process_group( - backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank - ) - except Exception as e: - logger = logging.getLogger(__name__) - logger.error("Process group URL: {}".format(dist_url)) - raise e - # synchronize is needed here to prevent a possible timeout after calling init_process_group - # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 - comm.synchronize() - - assert num_gpus_per_machine <= torch.cuda.device_count() - torch.cuda.set_device(local_rank) - - # Setup the local process group (which contains ranks within the same machine) - assert comm._LOCAL_PROCESS_GROUP is None - num_machines = world_size // num_gpus_per_machine - for i in range(num_machines): - ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) - pg = dist.new_group(ranks_on_i) - if i == machine_rank: - comm._LOCAL_PROCESS_GROUP = pg - - main_func(*args) diff --git a/spaces/hasibzunair/fifa-tryon-demo/predict_pose.py b/spaces/hasibzunair/fifa-tryon-demo/predict_pose.py deleted file mode 100644 index 14feb4b71b098afee1f02711322b14fe3fc39763..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/predict_pose.py +++ /dev/null @@ -1,105 +0,0 @@ -import cv2 -import numpy as np -import os -import json - -class general_pose_model(object): - def __init__(self, modelpath): - # Specify the model to be used - # Body25: 25 points - # COCO: 18 points - # MPI: 15 points - self.inWidth = 368 - self.inHeight = 368 - self.threshold = 0.05 - self.pose_net = self.general_coco_model(modelpath) - - def general_coco_model(self, modelpath): - self.points_name = { - "Nose": 0, "Neck": 1, - "RShoulder": 2, "RElbow": 3, "RWrist": 4, - "LShoulder": 5, "LElbow": 6, "LWrist": 7, - "RHip": 8, "RKnee": 9, "RAnkle": 10, - "LHip": 11, "LKnee": 12, "LAnkle": 13, - "REye": 14, "LEye": 15, - "REar": 16, "LEar": 17, - "Background": 18} - self.num_points = 18 - self.point_pairs = [[1, 0], [1, 2], [1, 5], - [2, 3], [3, 4], [5, 6], - [6, 7], [1, 8], [8, 9], - [9, 10], [1, 11], [11, 12], - [12, 13], [0, 14], [0, 15], - [14, 16], [15, 17]] - prototxt = os.path.join( - modelpath, - 'pose_deploy_linevec.prototxt') - caffemodel = os.path.join( - modelpath, - 'pose_iter_440000.caffemodel') - coco_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel) - - return coco_model - - def predict(self, imgfile): - img_cv2 = cv2.imread(imgfile) - img_height, img_width, _ = img_cv2.shape - inpBlob = cv2.dnn.blobFromImage(img_cv2, - 1.0 / 255, - (self.inWidth, self.inHeight), - (0, 0, 0), - swapRB=False, - crop=False) - self.pose_net.setInput(inpBlob) - self.pose_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) - self.pose_net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL) - - output = self.pose_net.forward() - - H = output.shape[2] - W = output.shape[3] - - points = [] - for idx in range(self.num_points): - probMap = output[0, idx, :, :] # confidence map. - - # Find global maxima of the probMap. - minVal, prob, minLoc, point = cv2.minMaxLoc(probMap) - - # Scale the point to fit on the original image - x = (img_width * point[0]) / W - y = (img_height * point[1]) / H - - if prob > self.threshold: - points.append(x) - points.append(y) - points.append(prob) - else: - points.append(0) - points.append(0) - points.append(0) - - return points - -def generate_pose_keypoints(img_file, pose_file): - - modelpath = 'pose' - pose_model = general_pose_model(modelpath) - - res_points = pose_model.predict(img_file) - - pose_data = {"version": 1, - "people": [ - {"pose_keypoints": res_points} - ] - } - - pose_keypoints_path = pose_file - - json_object = json.dumps(pose_data, indent = 4) - - # Writing to sample.json - with open(pose_keypoints_path, "w") as outfile: - outfile.write(json_object) - print('File saved at {}'.format(pose_keypoints_path)) - diff --git a/spaces/hf-accelerate/accelerate_examples/src/app.py b/spaces/hf-accelerate/accelerate_examples/src/app.py deleted file mode 100644 index 5f2cb959cac705b8940c854366b358d6c3fa9d9c..0000000000000000000000000000000000000000 --- a/spaces/hf-accelerate/accelerate_examples/src/app.py +++ /dev/null @@ -1,178 +0,0 @@ -from contextlib import contextmanager - -import gradio as gr -from markup import get_text, highlight -from template import get_templates - - -templates = get_templates() - - -def fill_tab(title, explanation): - """ - Fill the tab with the appropriate title and explanation. - """ - return gr.Markdown(title), gr.Markdown(explanation) - - -@contextmanager -def new_section(): - """ - A context manager to create a new section in the interface. Equivalent of: - ```python - with gr.Row(): - with gr.Column(): - ... - ``` - """ - with gr.Row(): - with gr.Column(): - yield - - -def change(inp, textbox): - """Based on an `inp`, render and highlight the appropriate code sample. - - Args: - inp (`str`): - The input button from the interface. - textbox (`str`): - The textbox specifying the tab name from the interface. - - Returns: - `tuple`: A tuple of the highlighted code diff, and the title for the section. - """ - if textbox == "base": - code, explanation, docs = get_text(inp, textbox) - if inp == "Basic": - return ( - highlight(code), - "## Accelerate Code (Base Integration)", - explanation, - docs, - ) - elif inp == "Calculating Metrics": - return (highlight(code), f"## Accelerate Code ({inp})", explanation, docs) - else: - return (highlight(code), f"## Accelerate Code ({inp})", explanation, docs) - elif textbox == "training_configuration": - yaml, changes, command, explanation, docs = get_text(inp, textbox) - return (highlight(yaml), highlight(changes), command, explanation, docs) - else: - raise ValueError(f"Invalid tab name: {textbox}") - - -default_base = change("Basic", "base") -default_training_config = change("Multi GPU", "training_configuration") - - -def base_features(textbox): - inp = gr.Radio( - [ - "Basic", - "Calculating Metrics", - "Checkpointing", - "Experiment Tracking", - "Gradient Accumulation", - ], - label="Select a feature you would like to integrate", - value="Basic", - ) - with new_section(): - feature, out = fill_tab("## Accelerate Code", default_base[0]) - with new_section(): - _, explanation = fill_tab("## Explanation", default_base[2]) - with new_section(): - _, docs = fill_tab("## Documentation Links", default_base[3]) - inp.change( - fn=change, inputs=[inp, textbox], outputs=[out, feature, explanation, docs] - ) - - -def training_config(textbox): - inp = gr.Radio( - [ - "AWS SageMaker", - "DeepSpeed", - "Megatron-LM", - "Multi GPU", - "Multi Node Multi GPU", - "PyTorch FSDP", - ], - label="Select a distributed YAML configuration you would like to view.", - value="Multi GPU", - ) - with new_section(): - _, yaml = fill_tab("## Example YAML Configuration", default_training_config[0]) - with new_section(): - _, changes = fill_tab( - "## Changes to Training Script", default_training_config[1] - ) - with new_section(): - _, command = fill_tab("## Command to Run Training", default_training_config[2]) - with new_section(): - _, explanation = fill_tab("## Explanation", default_training_config[3]) - with new_section(): - _, docs = fill_tab("## Documentation Links", default_training_config[4]) - inp.change( - fn=change, - inputs=[inp, textbox], - outputs=[yaml, changes, command, explanation, docs], - ) - - -# def big_model_inference(): -# inp = gr.Radio( -# ["Accelerate's Big Model Inference",], # "DeepSpeed ZeRO Stage-3 Offload" -# label="Select a feature you would like to integrate", -# value="Basic", -# ) -# with gr.Row(): -# with gr.Column(): -# feature = gr.Markdown("## Accelerate Code") -# out = gr.Markdown(default[0]) -# with gr.Row(): -# with gr.Column(): -# gr.Markdown(default[1]) -# explanation = gr.Markdown(default[2]) -# with gr.Row(): -# with gr.Column(): -# gr.Markdown("## Documentation Links") -# docs = gr.Markdown(default[3]) -# inp.change(fn=change, inputs=[inp, "big_model_inference"], outputs=[out, feature, explanation, docs]) - - -# def notebook_launcher(): -# inp = gr.Radio( -# ["Colab GPU", "Colab TPU", "Kaggle GPU", "Kaggle Multi GPU", "Kaggle TPU", "Multi GPU VMs"], -# label="Select a feature you would like to integrate", -# value="Basic", -# ) -# with gr.Row(): -# with gr.Column(): -# feature = gr.Markdown("## Accelerate Code") -# out = gr.Markdown(default[0]) -# with gr.Row(): -# with gr.Column(): -# gr.Markdown(default[1]) -# explanation = gr.Markdown(default[2]) -# with gr.Row(): -# with gr.Column(): -# gr.Markdown("## Documentation Links") -# docs = gr.Markdown(default[3]) -# inp.change(fn=change, inputs=[inp, "notebook_launcher"], outputs=[out, feature, explanation, docs]) - - -with gr.Blocks() as demo: - - with gr.Tabs(): - with gr.TabItem("Basic Training Integration"): - textbox = gr.Textbox(label="tab_name", visible=False, value="base") - base_features(textbox) - with gr.TabItem("Launch Configuration"): - textbox = gr.Textbox( - label="tab_name", visible=False, value="training_configuration" - ) - training_config(textbox) - -demo.launch() diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/alternative_experiment_planning/__init__.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/alternative_experiment_planning/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/learning_rate/__init__.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/learning_rate/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceCE_noSmooth.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceCE_noSmooth.py deleted file mode 100644 index 204ee5912a041b876351d739d8b2f1300a2a086d..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceCE_noSmooth.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 -from nnunet.training.loss_functions.dice_loss import SoftDiceLoss, DC_and_CE_loss - - -class nnUNetTrainerV2_Loss_DiceCE_noSmooth(nnUNetTrainerV2): - def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, - unpack_data=True, deterministic=True, fp16=False): - super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, - deterministic, fp16) - self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 0, 'do_bg': False}, {}) - - diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/utils_new/inspectfiles.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/utils_new/inspectfiles.py deleted file mode 100644 index f98e3622ae6e8c4e5730cbf116737479f1095db6..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/utils_new/inspectfiles.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import os -import matplotlib.pyplot as plt -import _pickle as cPickle - -path = '/home/ho11laqe/PycharmProjects/nnUNet_data/nnUNet_preprocessed/Task505_Glacier_mtl_boundary/nnUNetData_plans_mtl_2D_stage0/' -liste = os.listdir(path) -print(liste) -file = np.load(path + liste[0]) -with open(path+liste[1], "rb") as pkl_file: - pkl = cPickle.load(pkl_file) - print() - -data = file['data'] -plt.imshow(data[0][0]) -plt.show() -plt.imshow(data[0][0]) -plt.show() -print() \ No newline at end of file diff --git a/spaces/hoang1007/wav2vec2/src/datamodule/__init__.py b/spaces/hoang1007/wav2vec2/src/datamodule/__init__.py deleted file mode 100644 index d9e6625ca27d24868463bf875b776a6e27041261..0000000000000000000000000000000000000000 --- a/spaces/hoang1007/wav2vec2/src/datamodule/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .vlsp2020 import ( - VLSP2020TarDataset, - VLSP2020Dataset, -) diff --git a/spaces/hosst/ApplianceLLM/README.md b/spaces/hosst/ApplianceLLM/README.md deleted file mode 100644 index 91a1a940b6701b855d5f990cbe26cf794b4eae1d..0000000000000000000000000000000000000000 --- a/spaces/hosst/ApplianceLLM/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ApplianceLLM -emoji: ⚡ -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: bigscience-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/housexu123/bingo-2.0/src/lib/bots/bing/sr.ts b/spaces/housexu123/bingo-2.0/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/huaiji3y/bingo-Public/src/lib/bots/bing/utils.ts b/spaces/huaiji3y/bingo-Public/src/lib/bots/bing/utils.ts deleted file mode 100644 index 6bbbc5e463ad55bc1219b63cf78013f5360fc908..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/src/lib/bots/bing/utils.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { ChatResponseMessage, BingChatResponse } from './types' - -export function convertMessageToMarkdown(message: ChatResponseMessage): string { - if (message.messageType === 'InternalSearchQuery') { - return message.text - } - for (const card of message.adaptiveCards??[]) { - for (const block of card.body) { - if (block.type === 'TextBlock') { - return block.text - } - } - } - return '' -} - -const RecordSeparator = String.fromCharCode(30) - -export const websocketUtils = { - packMessage(data: any) { - return `${JSON.stringify(data)}${RecordSeparator}` - }, - unpackMessage(data: string | ArrayBuffer | Blob) { - if (!data) return {} - return data - .toString() - .split(RecordSeparator) - .filter(Boolean) - .map((s) => { - try { - return JSON.parse(s) - } catch (e) { - return {} - } - }) - }, -} - -export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise { - const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`, - { - method: 'HEAD', - headers, - redirect: 'manual' - }, - ); - - if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) { - throw new Error('请求异常,请检查身份信息是否有效') - } - - const resultId = RegExp.$1; - let count = 0 - const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`; - - do { - await sleep(3000); - const content = await fetch(imageThumbUrl, { headers, method: 'GET' }) - - // @ts-ignore - if (content.headers.get('content-length') > 1) { - const text = await content.text() - return (text?.match(/ target?.split('src="').pop()?.replace(/&/g, '&')) - .map(img => `![${prompt}](${img})`).join(' ') - } - } while(count ++ < 10); -} - - -export async function* streamAsyncIterable(stream: ReadableStream) { - const reader = stream.getReader() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - return - } - yield value - } - } finally { - reader.releaseLock() - } -} - -export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) - diff --git a/spaces/huggingchat/chat-ui/src/routes/login/callback/updateUser.spec.ts b/spaces/huggingchat/chat-ui/src/routes/login/callback/updateUser.spec.ts deleted file mode 100644 index 24a7ff2757519d26bd9af7c163cdd76fc25bf69e..0000000000000000000000000000000000000000 --- a/spaces/huggingchat/chat-ui/src/routes/login/callback/updateUser.spec.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { assert, it, describe, afterEach, vi, expect } from "vitest"; -import type { Cookies } from "@sveltejs/kit"; -import { collections } from "$lib/server/database"; -import { updateUser } from "./updateUser"; -import { ObjectId } from "mongodb"; -import { DEFAULT_SETTINGS } from "$lib/types/Settings"; -import { defaultModel } from "$lib/server/models"; - -const userData = { - preferred_username: "new-username", - name: "name", - picture: "https://example.com/avatar.png", - sub: "1234567890", -}; - -const locals = { - userId: "1234567890", - sessionId: "1234567890", -}; - -// @ts-expect-error SvelteKit cookies dumb mock -const cookiesMock: Cookies = { - set: vi.fn(), -}; - -const insertRandomUser = async () => { - const res = await collections.users.insertOne({ - _id: new ObjectId(), - createdAt: new Date(), - updatedAt: new Date(), - username: "base-username", - name: userData.name, - avatarUrl: userData.picture, - hfUserId: userData.sub, - sessionId: locals.sessionId, - }); - - return res.insertedId; -}; - -const insertRandomConversations = async (count: number) => { - const res = await collections.conversations.insertMany( - new Array(count).fill(0).map(() => ({ - _id: new ObjectId(), - title: "random title", - messages: [], - model: defaultModel.id, - createdAt: new Date(), - updatedAt: new Date(), - sessionId: locals.sessionId, - })) - ); - - return res.insertedIds; -}; - -describe("login", () => { - it("should update user if existing", async () => { - await insertRandomUser(); - - await updateUser({ userData, locals, cookies: cookiesMock }); - - const existingUser = await collections.users.findOne({ hfUserId: userData.sub }); - - assert.equal(existingUser?.name, userData.name); - - expect(cookiesMock.set).toBeCalledTimes(1); - }); - - it("should migrate pre-existing conversations for new user", async () => { - const insertedId = await insertRandomUser(); - - await insertRandomConversations(2); - - await updateUser({ userData, locals, cookies: cookiesMock }); - - const conversationCount = await collections.conversations.countDocuments({ - userId: insertedId, - sessionId: { $exists: false }, - }); - - assert.equal(conversationCount, 2); - - await collections.conversations.deleteMany({ userId: insertedId }); - }); - - it("should create default settings for new user", async () => { - await updateUser({ userData, locals, cookies: cookiesMock }); - - const user = await collections.users.findOne({ sessionId: locals.sessionId }); - - assert.exists(user); - - const settings = await collections.settings.findOne({ userId: user?._id }); - - expect(settings).toMatchObject({ - userId: user?._id, - updatedAt: expect.any(Date), - createdAt: expect.any(Date), - ethicsModalAcceptedAt: expect.any(Date), - ...DEFAULT_SETTINGS, - }); - - await collections.settings.deleteOne({ userId: user?._id }); - }); - - it("should migrate pre-existing settings for pre-existing user", async () => { - const { insertedId } = await collections.settings.insertOne({ - sessionId: locals.sessionId, - ethicsModalAcceptedAt: new Date(), - updatedAt: new Date(), - createdAt: new Date(), - ...DEFAULT_SETTINGS, - shareConversationsWithModelAuthors: false, - }); - - await updateUser({ userData, locals, cookies: cookiesMock }); - - const settings = await collections.settings.findOne({ - _id: insertedId, - sessionId: { $exists: false }, - }); - - assert.exists(settings); - - const user = await collections.users.findOne({ hfUserId: userData.sub }); - - expect(settings).toMatchObject({ - userId: user?._id, - updatedAt: expect.any(Date), - createdAt: expect.any(Date), - ethicsModalAcceptedAt: expect.any(Date), - ...DEFAULT_SETTINGS, - shareConversationsWithModelAuthors: false, - }); - - await collections.settings.deleteOne({ userId: user?._id }); - }); -}); - -afterEach(async () => { - await collections.users.deleteMany({ hfUserId: userData.sub }); - vi.clearAllMocks(); -}); diff --git a/spaces/huggingface-projects/stable-diffusion-multiplayer/frontend/src/lib/liveblocks/useObject.ts b/spaces/huggingface-projects/stable-diffusion-multiplayer/frontend/src/lib/liveblocks/useObject.ts deleted file mode 100644 index 43e0c1a0faa2d5d558372aa828efdf933e0326d8..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/stable-diffusion-multiplayer/frontend/src/lib/liveblocks/useObject.ts +++ /dev/null @@ -1,44 +0,0 @@ -// @ts-nocheck -import { LiveObject } from "@liveblocks/client"; -import { useStorage } from "./useStorage"; -import { onDestroy } from "svelte"; -import type { Writable } from "svelte/store"; -import { writable } from "svelte/store"; -import { useRoom } from "./useRoom"; - -/** - * Works similarly to `liveblocks-react` useObject - * https://liveblocks.io/docs/api-reference/liveblocks-react#useObject - * - * The main difference is that it returns a Svelte store: - * const obj = useObject() - * $obj.set('name', 'Chris') - * console.log($obj.get('name')) - */ -export function useObject(name: string, initial?: any): Writable { - const room = useRoom(); - const rootStore = useStorage(); - const list = writable(); - let unsubscribe = () => {}; - - const unsubscribeRoot = rootStore.subscribe((root) => { - if (!root) { - return; - } - - if (!root.get(name)) { - root.set(name, new LiveObject(initial)); - } - - list.set(root.get(name)); - - unsubscribe(); - unsubscribe = room.subscribe(root.get(name) as LiveObject, (newObject) => { - list.set(newObject); - }); - }); - - onDestroy(unsubscribeRoot); - - return list; -} diff --git a/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/PyPatchMatch/Makefile b/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/PyPatchMatch/Makefile deleted file mode 100644 index 791465529833fe4f6cc2825fa633d06e2e843038..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/PyPatchMatch/Makefile +++ /dev/null @@ -1,54 +0,0 @@ -# -# Makefile -# Jiayuan Mao, 2019-01-09 13:59 -# - -SRC_DIR = csrc -INC_DIR = csrc -OBJ_DIR = build/obj -TARGET = libpatchmatch.so - -LIB_TARGET = $(TARGET) -INCLUDE_DIR = -I $(SRC_DIR) -I $(INC_DIR) - -CXX = $(ENVIRONMENT_OPTIONS) g++ -CXXFLAGS = -std=c++14 -CXXFLAGS += -Ofast -ffast-math -w -# CXXFLAGS += -g -CXXFLAGS += $(shell pkg-config --cflags opencv.pc) -fPIC -CXXFLAGS += $(INCLUDE_DIR) -LDFLAGS = $(shell pkg-config --cflags --libs opencv.pc) -shared -fPIC - - -CXXSOURCES = $(shell find $(SRC_DIR)/ -name "*.cpp") -OBJS = $(addprefix $(OBJ_DIR)/,$(CXXSOURCES:.cpp=.o)) -DEPFILES = $(OBJS:.o=.d) - -.PHONY: all clean rebuild test - -all: $(LIB_TARGET) - -$(OBJ_DIR)/%.o: %.cpp - @echo "[CC] $< ..." - @$(CXX) -c $< $(CXXFLAGS) -o $@ - -$(OBJ_DIR)/%.d: %.cpp - @mkdir -pv $(dir $@) - @echo "[dep] $< ..." - @$(CXX) $(INCLUDE_DIR) $(CXXFLAGS) -MM -MT "$(OBJ_DIR)/$(<:.cpp=.o) $(OBJ_DIR)/$(<:.cpp=.d)" "$<" > "$@" - -sinclude $(DEPFILES) - -$(LIB_TARGET): $(OBJS) - @echo "[link] $(LIB_TARGET) ..." - @$(CXX) $(OBJS) -o $@ $(CXXFLAGS) $(LDFLAGS) - -clean: - rm -rf $(OBJ_DIR) $(LIB_TARGET) - -rebuild: - +@make clean - +@make - -# vim:ft=make -# diff --git a/spaces/huggingface-projects/stable-diffusion-multiplayer/static/_app/immutable/assets/_page-ff237a95.css b/spaces/huggingface-projects/stable-diffusion-multiplayer/static/_app/immutable/assets/_page-ff237a95.css deleted file mode 100644 index 603098fff1114044b5f884f8179dda966b5a20fb..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/stable-diffusion-multiplayer/static/_app/immutable/assets/_page-ff237a95.css +++ /dev/null @@ -1 +0,0 @@ -.cursor.svelte-14ka0w3{pointer-events:none;position:absolute;top:0px;left:0px;display:grid;touch-action:none;grid-template-columns:repeat(3,minmax(0,1fr));transform-origin:0 0}.frame.svelte-1wvt719{position:relative;display:grid;grid-template-columns:repeat(3,minmax(0,1fr));grid-template-rows:repeat(3,minmax(0,1fr));--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(8px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000);--tw-ring-opacity:1;--tw-ring-color:rgb(29 78 216 / var(--tw-ring-opacity)) }canvas.svelte-6dcxg7{transform-origin:0 0}.grid-row.svelte-tz2nn1.svelte-tz2nn1{display:grid;grid-template-columns:.5fr 2fr 1fr .5fr 2fr;align-items:center;justify-items:flex-start}.grid-row.svelte-tz2nn1 span.svelte-tz2nn1{white-space:nowrap} diff --git a/spaces/hylee/apdrawing/APDrawingGAN2/preprocess/readme.md b/spaces/hylee/apdrawing/APDrawingGAN2/preprocess/readme.md deleted file mode 100644 index ac9fa43b1f1a372f8a1052102c32158ecc18d975..0000000000000000000000000000000000000000 --- a/spaces/hylee/apdrawing/APDrawingGAN2/preprocess/readme.md +++ /dev/null @@ -1,71 +0,0 @@ -## Preprocessing steps - -Both training and testing images need: - -- align to 512x512 -- facial landmarks -- mask for eyes,nose,mouth,background - -Training images additionally need: - -- mask for face region - - -### 1. Align, resize, crop images to 512x512, and get facial landmarks - -All training and testing images in our model are aligned using facial landmarks. And landmarks after alignment are needed in our code. - -- First, 5 facial landmark for a face photo need to be detected (we detect using [MTCNN](https://github.com/kpzhang93/MTCNN_face_detection_alignment)(MTCNNv1)). - -- Then, we provide a matlab function in `face_align_512.m` to align, resize and crop face photos (and corresponding drawings) to 512x512.Call this function in MATLAB to align the image to 512x512. -For example, for `img_1701.jpg` in `example` dir, 5 detected facial landmark is saved in `example/img_1701_facial5point.mat`. Call following in MATLAB: -```bash -load('example/img_1701_facial5point.mat'); -[trans_img,trans_facial5point]=face_align_512('example/img_1701.jpg',facial5point,'example'); -``` - -This will align the image, and output aligned image + transformed facial landmark (in txt format) in `example` folder. -See `face_align_512.m` for more instructions. - -The saved transformed facial landmark need to be copied to `dataset/landmark/`, and has the **same filename** with aligned face photos (e.g. `dataset/data/test_single/31.png` should have landmark file `dataset/landmark/31.txt`). - -### 2. Prepare background masks - -In our work, background mask is segmented by method in -"Automatic Portrait Segmentation for Image Stylization" -Xiaoyong Shen, Aaron Hertzmann, Jiaya Jia, Sylvain Paris, Brian Price, Eli Shechtman, Ian Sachs. Computer Graphics Forum, 35(2)(Proc. Eurographics), 2016. - -We use code in http://xiaoyongshen.me/webpage_portrait/index.html to detect background masks for aligned face photos. -An example background mask is shown in `example/img_1701_aligned_bgmask.png`. - -The background masks need to be copied to `dataset/mask/bg/`, and has the **same filename** with aligned face photos (e.g. `dataset/data/test_single/31.png` should have background mask `dataset/mask/bg/31.png`) - -### 3. Prepare eyes/nose/mouth masks - -We use dlib to extract 68 landmarks for aligned face photos, and use these landmarks to get masks for local regions. -See an example in `get_partmask.py`, the eyes, nose, mouth masks for `example/img_1701_aligned.png` are `example/img_1701_aligned_[part]mask.png`, where part is in [eyel,eyer,nose,mouth]. - -The part masks need to be copied to `dataset/mask/[part]/`, and has the **same filename** with aligned face photos. - -### 4. (For training) Prepare face masks - -We use the face parsing net in https://github.com/cientgu/Mask_Guided_Portrait_Editing to detect face region. -The face parsing net will label each face into 11 classes, the 0 is for background, 10 is for hair, and the 1~9 are face regions. -An example face mask is shown in `example/img_1701_aligned_facemask.png`. - -The face masks need to be copied to `dataset/mask/face/`, and has the **same filename** with aligned face photos. - -### 5. (For training) Combine A and B - -We provide a python script to generate training data in the form of pairs of images {A,B}, i.e. pairs {face photo, drawing}. This script will concatenate each pair of images horizontally into one single image. Then we can learn to translate A to B: - -Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `test`, etc. In `/path/to/data/A/train`, put training face photos. In `/path/to/data/B/train`, put the corresponding artist drawings. Repeat same for `test`. - -Corresponding images in a pair {A,B} must both be images after aligning and of size 512x512, and have the same filename, e.g., `/path/to/data/A/train/1.png` is considered to correspond to `/path/to/data/B/train/1.png`. - -Once the data is formatted this way, call: -```bash -python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data -``` - -This will combine each pair of images (A,B) into a single image file, ready for training. \ No newline at end of file diff --git a/spaces/hysts/BLIP2-with-transformers/README.md b/spaces/hysts/BLIP2-with-transformers/README.md deleted file mode 100644 index bad73d8a2197eb1bed1a0ce27e2158b1287bff13..0000000000000000000000000000000000000000 --- a/spaces/hysts/BLIP2-with-transformers/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: BLIP2 with transformers -emoji: 🌖 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.50.2 -python_version: 3.10.13 -app_file: app.py -pinned: false -license: bsd-3-clause -duplicated_from: Salesforce/BLIP2 -suggested_hardware: a10g-small ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hysts/ControlNet-v1-1/depth_estimator.py b/spaces/hysts/ControlNet-v1-1/depth_estimator.py deleted file mode 100644 index 7c3cd1f949f55ca59bec4dda721a9af5d9e36d8f..0000000000000000000000000000000000000000 --- a/spaces/hysts/ControlNet-v1-1/depth_estimator.py +++ /dev/null @@ -1,25 +0,0 @@ -import numpy as np -import PIL.Image -from controlnet_aux.util import HWC3 -from transformers import pipeline - -from cv_utils import resize_image - - -class DepthEstimator: - def __init__(self): - self.model = pipeline("depth-estimation") - - def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image: - detect_resolution = kwargs.pop("detect_resolution", 512) - image_resolution = kwargs.pop("image_resolution", 512) - image = np.array(image) - image = HWC3(image) - image = resize_image(image, resolution=detect_resolution) - image = PIL.Image.fromarray(image) - image = self.model(image) - image = image["depth"] - image = np.array(image) - image = HWC3(image) - image = resize_image(image, resolution=image_resolution) - return PIL.Image.fromarray(image) diff --git a/spaces/hysts/ControlNet-with-Anything-v4/app.py b/spaces/hysts/ControlNet-with-Anything-v4/app.py deleted file mode 100644 index f3522901c917cb44c6ea9189bfef1520b15b48f9..0000000000000000000000000000000000000000 --- a/spaces/hysts/ControlNet-with-Anything-v4/app.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os -import pathlib -import shlex -import subprocess - -import gradio as gr -import torch - -if os.getenv('SYSTEM') == 'spaces': - with open('patch') as f: - subprocess.run(shlex.split('patch -p1'), stdin=f, cwd='ControlNet') - -base_url = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/' -names = [ - 'body_pose_model.pth', - 'dpt_hybrid-midas-501f0c75.pt', - 'hand_pose_model.pth', - 'mlsd_large_512_fp32.pth', - 'mlsd_tiny_512_fp32.pth', - 'network-bsds500.pth', - 'upernet_global_small.pth', -] -for name in names: - command = f'wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/{name} -O {name}' - out_path = pathlib.Path(f'ControlNet/annotator/ckpts/{name}') - if out_path.exists(): - continue - subprocess.run(shlex.split(command), cwd='ControlNet/annotator/ckpts/') - -from app_canny import create_demo as create_demo_canny -from app_depth import create_demo as create_demo_depth -from app_fake_scribble import create_demo as create_demo_fake_scribble -from app_hed import create_demo as create_demo_hed -from app_hough import create_demo as create_demo_hough -from app_normal import create_demo as create_demo_normal -from app_pose import create_demo as create_demo_pose -from app_scribble import create_demo as create_demo_scribble -from app_scribble_interactive import \ - create_demo as create_demo_scribble_interactive -from app_seg import create_demo as create_demo_seg -from model import Model, download_all_controlnet_weights - -DESCRIPTION = '# [ControlNet v1.0](https://github.com/lllyasviel/ControlNet) + [Anything-v4.0](https://huggingface.co/andite/anything-v4.0)' - -SPACE_ID = os.getenv('SPACE_ID') -ALLOW_CHANGING_BASE_MODEL = SPACE_ID != 'hysts/ControlNet-with-Anything-v4' - -if SPACE_ID is not None: - DESCRIPTION += f'\n

      For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. Duplicate Space

      ' -if not torch.cuda.is_available(): - DESCRIPTION += '\n

      Running on CPU 🥶 This demo does not work on CPU.

      ' - -if torch.cuda.is_available(): - if os.getenv('SYSTEM') == 'spaces': - download_all_controlnet_weights() - -MAX_IMAGES = int(os.getenv('MAX_IMAGES', '3')) -DEFAULT_NUM_IMAGES = min(MAX_IMAGES, int(os.getenv('DEFAULT_NUM_IMAGES', '1'))) - -DEFAULT_MODEL_ID = os.getenv('DEFAULT_MODEL_ID', 'andite/anything-v4.0') -model = Model(base_model_id=DEFAULT_MODEL_ID, task_name='canny') - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - with gr.Tabs(): - with gr.TabItem('Canny'): - create_demo_canny(model.process_canny, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Hough'): - create_demo_hough(model.process_hough, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('HED'): - create_demo_hed(model.process_hed, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Scribble'): - create_demo_scribble(model.process_scribble, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Scribble Interactive'): - create_demo_scribble_interactive( - model.process_scribble_interactive, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Fake Scribble'): - create_demo_fake_scribble(model.process_fake_scribble, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Pose'): - create_demo_pose(model.process_pose, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Segmentation'): - create_demo_seg(model.process_seg, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Depth'): - create_demo_depth(model.process_depth, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - with gr.TabItem('Normal map'): - create_demo_normal(model.process_normal, - max_images=MAX_IMAGES, - default_num_images=DEFAULT_NUM_IMAGES) - - with gr.Accordion(label='Base model', open=False): - with gr.Row(): - with gr.Column(): - current_base_model = gr.Text(label='Current base model') - with gr.Column(scale=0.3): - check_base_model_button = gr.Button('Check current base model') - with gr.Row(): - with gr.Column(): - new_base_model_id = gr.Text( - label='New base model', - max_lines=1, - placeholder='runwayml/stable-diffusion-v1-5', - info= - 'The base model must be compatible with Stable Diffusion v1.5.', - interactive=ALLOW_CHANGING_BASE_MODEL) - with gr.Column(scale=0.3): - change_base_model_button = gr.Button( - 'Change base model', interactive=ALLOW_CHANGING_BASE_MODEL) - if not ALLOW_CHANGING_BASE_MODEL: - gr.Markdown( - '''The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space. Duplicate Space''' - ) - - check_base_model_button.click(fn=lambda: model.base_model_id, - outputs=current_base_model, - queue=False) - new_base_model_id.submit(fn=model.set_base_model, - inputs=new_base_model_id, - outputs=current_base_model) - change_base_model_button.click(fn=model.set_base_model, - inputs=new_base_model_id, - outputs=current_base_model) - -demo.queue(api_open=False, max_size=10).launch() diff --git a/spaces/hysts/ControlNet/app_hed.py b/spaces/hysts/ControlNet/app_hed.py deleted file mode 100644 index 2d6146f864f6bec127f72687855169307eb68cc9..0000000000000000000000000000000000000000 --- a/spaces/hysts/ControlNet/app_hed.py +++ /dev/null @@ -1,83 +0,0 @@ -# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_hed2image.py -# The original license file is LICENSE.ControlNet in this repo. -import gradio as gr - - -def create_demo(process, max_images=12, default_num_images=3): - with gr.Blocks() as demo: - with gr.Row(): - gr.Markdown('## Control Stable Diffusion with HED Maps') - with gr.Row(): - with gr.Column(): - input_image = gr.Image(source='upload', type='numpy') - prompt = gr.Textbox(label='Prompt') - run_button = gr.Button(label='Run') - with gr.Accordion('Advanced options', open=False): - num_samples = gr.Slider(label='Images', - minimum=1, - maximum=max_images, - value=default_num_images, - step=1) - image_resolution = gr.Slider(label='Image Resolution', - minimum=256, - maximum=512, - value=512, - step=256) - detect_resolution = gr.Slider(label='HED Resolution', - minimum=128, - maximum=512, - value=512, - step=1) - num_steps = gr.Slider(label='Steps', - minimum=1, - maximum=100, - value=20, - step=1) - guidance_scale = gr.Slider(label='Guidance Scale', - minimum=0.1, - maximum=30.0, - value=9.0, - step=0.1) - seed = gr.Slider(label='Seed', - minimum=-1, - maximum=2147483647, - step=1, - randomize=True) - a_prompt = gr.Textbox( - label='Added Prompt', - value='best quality, extremely detailed') - n_prompt = gr.Textbox( - label='Negative Prompt', - value= - 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' - ) - with gr.Column(): - result = gr.Gallery(label='Output', - show_label=False, - elem_id='gallery').style(grid=2, - height='auto') - inputs = [ - input_image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - detect_resolution, - num_steps, - guidance_scale, - seed, - ] - prompt.submit(fn=process, inputs=inputs, outputs=result) - run_button.click(fn=process, - inputs=inputs, - outputs=result, - api_name='hed') - return demo - - -if __name__ == '__main__': - from model import Model - model = Model() - demo = create_demo(model.process_hed) - demo.queue().launch() diff --git a/spaces/hysts/MobileStyleGAN/README.md b/spaces/hysts/MobileStyleGAN/README.md deleted file mode 100644 index 2c029491fc81651b7c8dd06e1f9f65c2bbdad596..0000000000000000000000000000000000000000 --- a/spaces/hysts/MobileStyleGAN/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: MobileStyleGAN -emoji: 📚 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -suggested_hardware: t4-small ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference - -https://arxiv.org/abs/2104.04767 diff --git a/spaces/hysts/cv_diffusion_text-to-image-synthesis_tiny/style.css b/spaces/hysts/cv_diffusion_text-to-image-synthesis_tiny/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/hysts/cv_diffusion_text-to-image-synthesis_tiny/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/hzy123/bingo/src/components/tailwind-indicator.tsx b/spaces/hzy123/bingo/src/components/tailwind-indicator.tsx deleted file mode 100644 index f2a1291213dd67055fcebe67fab574c8441338df..0000000000000000000000000000000000000000 --- a/spaces/hzy123/bingo/src/components/tailwind-indicator.tsx +++ /dev/null @@ -1,14 +0,0 @@ -export function TailwindIndicator() { - if (process.env.NODE_ENV === 'production') return null - - return ( -
      -
      xs
      -
      sm
      -
      md
      -
      lg
      -
      xl
      -
      2xl
      -
      - ) -} diff --git a/spaces/iamadhxxx/Analyse/README.md b/spaces/iamadhxxx/Analyse/README.md deleted file mode 100644 index d27da64ee10a91f97c36bd1eda73f2f1c6680f46..0000000000000000000000000000000000000000 --- a/spaces/iamadhxxx/Analyse/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Analyse -emoji: 🏆 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.43.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/iamstolas/STOLAS/src/components/ui/input.tsx b/spaces/iamstolas/STOLAS/src/components/ui/input.tsx deleted file mode 100644 index 684a857f3d769b78818fb13de1abaebfb09ca79c..0000000000000000000000000000000000000000 --- a/spaces/iamstolas/STOLAS/src/components/ui/input.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface InputProps - extends React.InputHTMLAttributes {} - -const Input = React.forwardRef( - ({ className, type, ...props }, ref) => { - return ( - - ) - } -) -Input.displayName = 'Input' - -export { Input } diff --git a/spaces/indifendi/baby1/Dockerfile b/spaces/indifendi/baby1/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/indifendi/baby1/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Assassin Creed 3 Rg Mechanics Update 102 [BETTER].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Assassin Creed 3 Rg Mechanics Update 102 [BETTER].md deleted file mode 100644 index 1915f4cad7b8f6c50aabd982f725ef251a9809dc..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Assassin Creed 3 Rg Mechanics Update 102 [BETTER].md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      as usual, getting the most of the game involves side quests. you will most certainly be getting a lot of the aforementioned chests and other loot, but youre also likely to be carrying around a prize or two from all the various third party vendors youll come across.

      -

      Assassin Creed 3 Rg Mechanics Update 102


      Download Filehttps://urlin.us/2uEwoO



      -

      i must admit, i ve been one of those avid players trying to make the most of their time spent wandering the wastes with their hidden blades. now, i usually end up getting stopped before it dawns on me that i cant do anything but talk to the npcs. i should probably stop being the bad guy here, but i cant help it. its all worth it in the end.

      -

      besides consumables like health and stamina packs, theres also a variety of different health and status items for you to use in-game. the number of different versions of the items provided to the player will vary depending on which faction youre playing as. some of the better all around items include the first aid healing kit which basically does two things: heals you and gives you the ability to use your houdini skills at certain points along the way. the ice poisoning weapon allows you to poison your enemies by firing at them, doing major damage (until your stamina runs out) and making them immobile. a third item, the explosive bag is usually used to make enemies immobile, and, after exploding, its seeds will drop on the ground if you move a bit. this can then be picked up and used to plant harvests for you in the future. food and water will also be provided to the player, but arent really necessary, being able to scrounge for it elsewhere.

      -

      since youll be completing quests in egypt, ancient greece and rome, theres a new universal "enchanter" skill line . this allows you to build divine and physical power attributes with a variety of different items.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet !FULL!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet !FULL!.md deleted file mode 100644 index 0ddb14fe1c41f76ac36863bcc89ee7cb2a1c441a..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet !FULL!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet


      Download File » https://urlin.us/2uEveP



      -
      -Breaking bad all seasons complete bluray 720p download. ... 95ec0d2f82 breaking bad s05 season 5 complete 720p hdtv x264 publichd picktorrent. ... Breaking bad s0104 s04e01 hdtv 720p download full tv. ... Breaking bad season s05 complete 480p bluray hdtv x264encodekingbreaking bad season s05 complete 48. 1fdad05405
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat Pro Dc 2020 Crack Serial Number Latest.md b/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat Pro Dc 2020 Crack Serial Number Latest.md deleted file mode 100644 index a179012af100bc71a42005fde68abc5b4eca31bb..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat Pro Dc 2020 Crack Serial Number Latest.md +++ /dev/null @@ -1,26 +0,0 @@ - -

      How to Download and Install Adobe Acrobat Pro DC 2020 Crack Serial Number Latest

      -

      Adobe Acrobat Pro DC 2020 is a powerful software that allows you to create, edit, convert, sign, and share PDF files. It also lets you access your PDF files from anywhere using the cloud services. If you want to download and install Adobe Acrobat Pro DC 2020 crack serial number for free, follow these steps:

      -
        -
      1. Download the setup file and the patch file from here [^1^].
      2. -
      3. Extract the files using a file extractor such as WinRAR or 7-Zip.
      4. -
      5. Run the setup file and follow the instructions to install Adobe Acrobat Pro DC 2020 on your computer.
      6. -
      7. After the installation is complete, do not launch the program yet.
      8. -
      9. Copy the patch file and paste it into the installation folder of Adobe Acrobat Pro DC 2020.
      10. -
      11. Run the patch file as administrator and click on the patch button.
      12. -
      13. Wait for the patching process to finish and close the patch file.
      14. -
      15. Launch Adobe Acrobat Pro DC 2020 and enjoy the full version for free.
      16. -
      -

      Note: You may need to disable your antivirus or firewall before running the patch file. Also, this method is only for educational purposes and we do not recommend using cracked software. You should buy the original software from here.

      -

      Adobe Acrobat Pro Dc 2020 Crack Serial Number {Latest}


      DOWNLOAD 🗸🗸🗸 https://tiurll.com/2uCkmW



      Adobe Acrobat Pro DC 2020 is the latest version of the popular PDF software that offers many new and improved features. Some of the features are:

      -
        -
      • PDF editing: You can edit text, images, links, and other elements in your PDF files with ease. You can also compare two versions of a PDF file and highlight the differences.
      • -
      • PDF conversion: You can convert PDF files to various formats such as Word, Excel, PowerPoint, HTML, and more. You can also create PDF files from any application that supports printing.
      • -
      • PDF signing: You can sign your PDF files digitally using certificates or cloud signatures. You can also request signatures from others and track their status.
      • -
      • PDF sharing: You can share your PDF files with anyone using email, cloud services, or online platforms. You can also collaborate with others on PDF files using comments and annotations.
      • -
      • PDF accessibility: You can make your PDF files more accessible for people with disabilities using tools such as tags, headings, alt text, and more. You can also check and fix accessibility issues in your PDF files.
      • -
      -

      Adobe Acrobat Pro DC 2020 is a comprehensive and versatile software that can help you work with PDF files in various ways. It is compatible with Windows and Mac operating systems and requires a subscription to use. You can download a free trial version from here.

      In this article, we have learned how to download and install Adobe Acrobat Pro DC 2020 crack serial number for free. We have also learned about some of the features of Adobe Acrobat Pro DC 2020 and how it can help us work with PDF files. However, we have also noted that using cracked software is illegal and risky. It may expose our computer to viruses, malware, or other threats. It may also violate the terms and conditions of Adobe and result in legal consequences. Therefore, we recommend that you buy the original software from Adobe and enjoy its benefits legally and safely.

      -

      Adobe Acrobat Pro DC 2020 is a powerful and versatile software that can help you create, edit, convert, sign, share, and access PDF files. It is compatible with Windows and Mac operating systems and requires a subscription to use. You can download a free trial version from here or buy the original software from here. We hope you found this article helpful and informative. Thank you for reading.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download LINK.md b/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download LINK.md deleted file mode 100644 index 6d2ba129b7b58ed574900b6b7a8c4042320c2293..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

      crack DWG TrueView 2016 download


      Download 🆗 https://tiurll.com/2uCmgS



      - -Autodesk Inventor 2016; Autodesk Inventor 2018 Crack Free Download is the ... export pdf, Demo autodesk pid, Dwg viewer autodesk, Autodesk vault asp. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/ismot/1802t1/README.md b/spaces/ismot/1802t1/README.md deleted file mode 100644 index 87b7ebfb15f58162ce9bc1d164a5c6993a1f1dec..0000000000000000000000000000000000000000 --- a/spaces/ismot/1802t1/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Voice Conversion Yourtts -emoji: 😻 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: unknown -duplicated_from: ramkamal2000/voice-conversion-yourtts ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ivotai/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py b/spaces/ivotai/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/ivotai/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/jackli888/stable-diffusion-webui/modules/scripts_auto_postprocessing.py b/spaces/jackli888/stable-diffusion-webui/modules/scripts_auto_postprocessing.py deleted file mode 100644 index 16ec8b613b134b0a9a4054f06d5979ec1822c422..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/scripts_auto_postprocessing.py +++ /dev/null @@ -1,42 +0,0 @@ -from modules import scripts, scripts_postprocessing, shared - - -class ScriptPostprocessingForMainUI(scripts.Script): - def __init__(self, script_postproc): - self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc - self.postprocessing_controls = None - - def title(self): - return self.script.name - - def show(self, is_img2img): - return scripts.AlwaysVisible - - def ui(self, is_img2img): - self.postprocessing_controls = self.script.ui() - return self.postprocessing_controls.values() - - def postprocess_image(self, p, script_pp, *args): - args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)} - - pp = scripts_postprocessing.PostprocessedImage(script_pp.image) - pp.info = {} - self.script.process(pp, **args_dict) - p.extra_generation_params.update(pp.info) - script_pp.image = pp.image - - -def create_auto_preprocessing_script_data(): - from modules import scripts - - res = [] - - for name in shared.opts.postprocessing_enable_in_main_ui: - script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None) - if script is None: - continue - - constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class()) - res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module)) - - return res diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_AES.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_AES.py deleted file mode 100644 index 116deec64821db74042b282a832e087b3673987f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_AES.py +++ /dev/null @@ -1,1351 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SelfTest/Cipher/AES.py: Self-test for the AES cipher -# -# Written in 2008 by Dwayne C. Litzenberger -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -"""Self-test suite for Crypto.Cipher.AES""" - -from __future__ import print_function - -import unittest -from Crypto.Hash import SHA256 -from Crypto.Cipher import AES -from Crypto.Util.py3compat import * -from binascii import hexlify - -# This is a list of (plaintext, ciphertext, key[, description[, params]]) tuples. -test_data = [ - # FIPS PUB 197 test vectors - # http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf - - ('00112233445566778899aabbccddeeff', '69c4e0d86a7b0430d8cdb78070b4c55a', - '000102030405060708090a0b0c0d0e0f', 'FIPS 197 C.1 (AES-128)'), - - ('00112233445566778899aabbccddeeff', 'dda97ca4864cdfe06eaf70a0ec0d7191', - '000102030405060708090a0b0c0d0e0f1011121314151617', - 'FIPS 197 C.2 (AES-192)'), - - ('00112233445566778899aabbccddeeff', '8ea2b7ca516745bfeafc49904b496089', - '000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f', - 'FIPS 197 C.3 (AES-256)'), - - # Rijndael128 test vectors - # Downloaded 2008-09-13 from - # http://www.iaik.tugraz.at/Research/krypto/AES/old/~rijmen/rijndael/testvalues.tar.gz - - # ecb_tbl.txt, KEYSIZE=128 - ('506812a45f08c889b97f5980038b8359', 'd8f532538289ef7d06b506a4fd5be9c9', - '00010203050607080a0b0c0d0f101112', - 'ecb-tbl-128: I=1'), - ('5c6d71ca30de8b8b00549984d2ec7d4b', '59ab30f4d4ee6e4ff9907ef65b1fb68c', - '14151617191a1b1c1e1f202123242526', - 'ecb-tbl-128: I=2'), - ('53f3f4c64f8616e4e7c56199f48f21f6', 'bf1ed2fcb2af3fd41443b56d85025cb1', - '28292a2b2d2e2f30323334353738393a', - 'ecb-tbl-128: I=3'), - ('a1eb65a3487165fb0f1c27ff9959f703', '7316632d5c32233edcb0780560eae8b2', - '3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-128: I=4'), - ('3553ecf0b1739558b08e350a98a39bfa', '408c073e3e2538072b72625e68b8364b', - '50515253555657585a5b5c5d5f606162', - 'ecb-tbl-128: I=5'), - ('67429969490b9711ae2b01dc497afde8', 'e1f94dfa776597beaca262f2f6366fea', - '64656667696a6b6c6e6f707173747576', - 'ecb-tbl-128: I=6'), - ('93385c1f2aec8bed192f5a8e161dd508', 'f29e986c6a1c27d7b29ffd7ee92b75f1', - '78797a7b7d7e7f80828384858788898a', - 'ecb-tbl-128: I=7'), - ('b5bf946be19beb8db3983b5f4c6e8ddb', '131c886a57f8c2e713aba6955e2b55b5', - '8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-128: I=8'), - ('41321ee10e21bd907227c4450ff42324', 'd2ab7662df9b8c740210e5eeb61c199d', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2', - 'ecb-tbl-128: I=9'), - ('00a82f59c91c8486d12c0a80124f6089', '14c10554b2859c484cab5869bbe7c470', - 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-128: I=10'), - ('7ce0fd076754691b4bbd9faf8a1372fe', 'db4d498f0a49cf55445d502c1f9ab3b5', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9da', - 'ecb-tbl-128: I=11'), - ('23605a8243d07764541bc5ad355b3129', '6d96fef7d66590a77a77bb2056667f7f', - 'dcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-128: I=12'), - ('12a8cfa23ea764fd876232b4e842bc44', '316fb68edba736c53e78477bf913725c', - 'f0f1f2f3f5f6f7f8fafbfcfdfe010002', - 'ecb-tbl-128: I=13'), - ('bcaf32415e8308b3723e5fdd853ccc80', '6936f2b93af8397fd3a771fc011c8c37', - '04050607090a0b0c0e0f101113141516', - 'ecb-tbl-128: I=14'), - ('89afae685d801ad747ace91fc49adde0', 'f3f92f7a9c59179c1fcc2c2ba0b082cd', - '2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-128: I=15'), - ('f521d07b484357c4a69e76124a634216', '6a95ea659ee3889158e7a9152ff04ebc', - '40414243454647484a4b4c4d4f505152', - 'ecb-tbl-128: I=16'), - ('3e23b3bc065bcc152407e23896d77783', '1959338344e945670678a5d432c90b93', - '54555657595a5b5c5e5f606163646566', - 'ecb-tbl-128: I=17'), - ('79f0fba002be1744670e7e99290d8f52', 'e49bddd2369b83ee66e6c75a1161b394', - '68696a6b6d6e6f70727374757778797a', - 'ecb-tbl-128: I=18'), - ('da23fe9d5bd63e1d72e3dafbe21a6c2a', 'd3388f19057ff704b70784164a74867d', - '7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-128: I=19'), - ('e3f5698ba90b6a022efd7db2c7e6c823', '23aa03e2d5e4cd24f3217e596480d1e1', - 'a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-128: I=20'), - ('bdc2691d4f1b73d2700679c3bcbf9c6e', 'c84113d68b666ab2a50a8bdb222e91b9', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2', - 'ecb-tbl-128: I=21'), - ('ba74e02093217ee1ba1b42bd5624349a', 'ac02403981cd4340b507963db65cb7b6', - '08090a0b0d0e0f10121314151718191a', - 'ecb-tbl-128: I=22'), - ('b5c593b5851c57fbf8b3f57715e8f680', '8d1299236223359474011f6bf5088414', - '6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-128: I=23'), - ('3da9bd9cec072381788f9387c3bbf4ee', '5a1d6ab8605505f7977e55b9a54d9b90', - '80818283858687888a8b8c8d8f909192', - 'ecb-tbl-128: I=24'), - ('4197f3051121702ab65d316b3c637374', '72e9c2d519cf555e4208805aabe3b258', - '94959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-128: I=25'), - ('9f46c62ec4f6ee3f6e8c62554bc48ab7', 'a8f3e81c4a23a39ef4d745dffe026e80', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9ba', - 'ecb-tbl-128: I=26'), - ('0220673fe9e699a4ebc8e0dbeb6979c8', '546f646449d31458f9eb4ef5483aee6c', - 'bcbdbebfc1c2c3c4c6c7c8c9cbcccdce', - 'ecb-tbl-128: I=27'), - ('b2b99171337ded9bc8c2c23ff6f18867', '4dbe4bc84ac797c0ee4efb7f1a07401c', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2', - 'ecb-tbl-128: I=28'), - ('a7facf4e301e984e5efeefd645b23505', '25e10bfb411bbd4d625ac8795c8ca3b3', - 'e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-128: I=29'), - ('f7c762e4a9819160fd7acfb6c4eedcdd', '315637405054ec803614e43def177579', - 'f8f9fafbfdfefe00020304050708090a', - 'ecb-tbl-128: I=30'), - ('9b64fc21ea08709f4915436faa70f1be', '60c5bc8a1410247295c6386c59e572a8', - '0c0d0e0f11121314161718191b1c1d1e', - 'ecb-tbl-128: I=31'), - ('52af2c3de07ee6777f55a4abfc100b3f', '01366fc8ca52dfe055d6a00a76471ba6', - '20212223252627282a2b2c2d2f303132', - 'ecb-tbl-128: I=32'), - ('2fca001224386c57aa3f968cbe2c816f', 'ecc46595516ec612449c3f581e7d42ff', - '34353637393a3b3c3e3f404143444546', - 'ecb-tbl-128: I=33'), - ('4149c73658a4a9c564342755ee2c132f', '6b7ffe4c602a154b06ee9c7dab5331c9', - '48494a4b4d4e4f50525354555758595a', - 'ecb-tbl-128: I=34'), - ('af60005a00a1772f7c07a48a923c23d2', '7da234c14039a240dd02dd0fbf84eb67', - '5c5d5e5f61626364666768696b6c6d6e', - 'ecb-tbl-128: I=35'), - ('6fccbc28363759914b6f0280afaf20c6', 'c7dc217d9e3604ffe7e91f080ecd5a3a', - '70717273757677787a7b7c7d7f808182', - 'ecb-tbl-128: I=36'), - ('7d82a43ddf4fefa2fc5947499884d386', '37785901863f5c81260ea41e7580cda5', - '84858687898a8b8c8e8f909193949596', - 'ecb-tbl-128: I=37'), - ('5d5a990eaab9093afe4ce254dfa49ef9', 'a07b9338e92ed105e6ad720fccce9fe4', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aa', - 'ecb-tbl-128: I=38'), - ('4cd1e2fd3f4434b553aae453f0ed1a02', 'ae0fb9722418cc21a7da816bbc61322c', - 'acadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-128: I=39'), - ('5a2c9a9641d4299125fa1b9363104b5e', 'c826a193080ff91ffb21f71d3373c877', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2', - 'ecb-tbl-128: I=40'), - ('b517fe34c0fa217d341740bfd4fe8dd4', '1181b11b0e494e8d8b0aa6b1d5ac2c48', - 'd4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-128: I=41'), - ('014baf2278a69d331d5180103643e99a', '6743c3d1519ab4f2cd9a78ab09a511bd', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fa', - 'ecb-tbl-128: I=42'), - ('b529bd8164f20d0aa443d4932116841c', 'dc55c076d52bacdf2eefd952946a439d', - 'fcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-128: I=43'), - ('2e596dcbb2f33d4216a1176d5bd1e456', '711b17b590ffc72b5c8e342b601e8003', - '10111213151617181a1b1c1d1f202122', - 'ecb-tbl-128: I=44'), - ('7274a1ea2b7ee2424e9a0e4673689143', '19983bb0950783a537e1339f4aa21c75', - '24252627292a2b2c2e2f303133343536', - 'ecb-tbl-128: I=45'), - ('ae20020bd4f13e9d90140bee3b5d26af', '3ba7762e15554169c0f4fa39164c410c', - '38393a3b3d3e3f40424344454748494a', - 'ecb-tbl-128: I=46'), - ('baac065da7ac26e855e79c8849d75a02', 'a0564c41245afca7af8aa2e0e588ea89', - '4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-128: I=47'), - ('7c917d8d1d45fab9e2540e28832540cc', '5e36a42a2e099f54ae85ecd92e2381ed', - '60616263656667686a6b6c6d6f707172', - 'ecb-tbl-128: I=48'), - ('bde6f89e16daadb0e847a2a614566a91', '770036f878cd0f6ca2268172f106f2fe', - '74757677797a7b7c7e7f808183848586', - 'ecb-tbl-128: I=49'), - ('c9de163725f1f5be44ebb1db51d07fbc', '7e4e03908b716116443ccf7c94e7c259', - '88898a8b8d8e8f90929394959798999a', - 'ecb-tbl-128: I=50'), - ('3af57a58f0c07dffa669572b521e2b92', '482735a48c30613a242dd494c7f9185d', - '9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-128: I=51'), - ('3d5ebac306dde4604f1b4fbbbfcdae55', 'b4c0f6c9d4d7079addf9369fc081061d', - 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2', - 'ecb-tbl-128: I=52'), - ('c2dfa91bceb76a1183c995020ac0b556', 'd5810fe0509ac53edcd74f89962e6270', - 'c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-128: I=53'), - ('c70f54305885e9a0746d01ec56c8596b', '03f17a16b3f91848269ecdd38ebb2165', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9ea', - 'ecb-tbl-128: I=54'), - ('c4f81b610e98012ce000182050c0c2b2', 'da1248c3180348bad4a93b4d9856c9df', - 'ecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-128: I=55'), - ('eaab86b1d02a95d7404eff67489f97d4', '3d10d7b63f3452c06cdf6cce18be0c2c', - '00010203050607080a0b0c0d0f101112', - 'ecb-tbl-128: I=56'), - ('7c55bdb40b88870b52bec3738de82886', '4ab823e7477dfddc0e6789018fcb6258', - '14151617191a1b1c1e1f202123242526', - 'ecb-tbl-128: I=57'), - ('ba6eaa88371ff0a3bd875e3f2a975ce0', 'e6478ba56a77e70cfdaa5c843abde30e', - '28292a2b2d2e2f30323334353738393a', - 'ecb-tbl-128: I=58'), - ('08059130c4c24bd30cf0575e4e0373dc', '1673064895fbeaf7f09c5429ff75772d', - '3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-128: I=59'), - ('9a8eab004ef53093dfcf96f57e7eda82', '4488033ae9f2efd0ca9383bfca1a94e9', - '50515253555657585a5b5c5d5f606162', - 'ecb-tbl-128: I=60'), - ('0745b589e2400c25f117b1d796c28129', '978f3b8c8f9d6f46626cac3c0bcb9217', - '64656667696a6b6c6e6f707173747576', - 'ecb-tbl-128: I=61'), - ('2f1777781216cec3f044f134b1b92bbe', 'e08c8a7e582e15e5527f1d9e2eecb236', - '78797a7b7d7e7f80828384858788898a', - 'ecb-tbl-128: I=62'), - ('353a779ffc541b3a3805d90ce17580fc', 'cec155b76ac5ffda4cf4f9ca91e49a7a', - '8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-128: I=63'), - ('1a1eae4415cefcf08c4ac1c8f68bea8f', 'd5ac7165763225dd2a38cdc6862c29ad', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2', - 'ecb-tbl-128: I=64'), - ('e6e7e4e5b0b3b2b5d4d5aaab16111013', '03680fe19f7ce7275452020be70e8204', - 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-128: I=65'), - ('f8f9fafbfbf8f9e677767170efe0e1e2', '461df740c9781c388e94bb861ceb54f6', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9da', - 'ecb-tbl-128: I=66'), - ('63626160a1a2a3a445444b4a75727370', '451bd60367f96483042742219786a074', - 'dcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-128: I=67'), - ('717073720605040b2d2c2b2a05fafbf9', 'e4dfa42671a02e57ef173b85c0ea9f2b', - 'f0f1f2f3f5f6f7f8fafbfcfdfe010002', - 'ecb-tbl-128: I=68'), - ('78797a7beae9e8ef3736292891969794', 'ed11b89e76274282227d854700a78b9e', - '04050607090a0b0c0e0f101113141516', - 'ecb-tbl-128: I=69'), - ('838281803231300fdddcdbdaa0afaead', '433946eaa51ea47af33895f2b90b3b75', - '18191a1b1d1e1f20222324252728292a', - 'ecb-tbl-128: I=70'), - ('18191a1bbfbcbdba75747b7a7f78797a', '6bc6d616a5d7d0284a5910ab35022528', - '2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-128: I=71'), - ('848586879b989996a3a2a5a4849b9a99', 'd2a920ecfe919d354b5f49eae9719c98', - '40414243454647484a4b4c4d4f505152', - 'ecb-tbl-128: I=72'), - ('0001020322212027cacbf4f551565754', '3a061b17f6a92885efbd0676985b373d', - '54555657595a5b5c5e5f606163646566', - 'ecb-tbl-128: I=73'), - ('cecfcccdafacadb2515057564a454447', 'fadeec16e33ea2f4688499d157e20d8f', - '68696a6b6d6e6f70727374757778797a', - 'ecb-tbl-128: I=74'), - ('92939091cdcecfc813121d1c80878685', '5cdefede59601aa3c3cda36fa6b1fa13', - '7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-128: I=75'), - ('d2d3d0d16f6c6d6259585f5ed1eeefec', '9574b00039844d92ebba7ee8719265f8', - '90919293959697989a9b9c9d9fa0a1a2', - 'ecb-tbl-128: I=76'), - ('acadaeaf878485820f0e1110d5d2d3d0', '9a9cf33758671787e5006928188643fa', - 'a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-128: I=77'), - ('9091929364676619e6e7e0e1757a7b78', '2cddd634c846ba66bb46cbfea4a674f9', - 'b8b9babbbdbebfc0c2c3c4c5c7c8c9ca', - 'ecb-tbl-128: I=78'), - ('babbb8b98a89888f74757a7b92959497', 'd28bae029393c3e7e26e9fafbbb4b98f', - 'cccdcecfd1d2d3d4d6d7d8d9dbdcddde', - 'ecb-tbl-128: I=79'), - ('8d8c8f8e6e6d6c633b3a3d3ccad5d4d7', 'ec27529b1bee0a9ab6a0d73ebc82e9b7', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2', - 'ecb-tbl-128: I=80'), - ('86878485010203040808f7f767606162', '3cb25c09472aff6ee7e2b47ccd7ccb17', - 'f4f5f6f7f9fafbfcfefe010103040506', - 'ecb-tbl-128: I=81'), - ('8e8f8c8d656667788a8b8c8d010e0f0c', 'dee33103a7283370d725e44ca38f8fe5', - '08090a0b0d0e0f10121314151718191a', - 'ecb-tbl-128: I=82'), - ('c8c9cacb858687807a7b7475e7e0e1e2', '27f9bcd1aac64bffc11e7815702c1a69', - '1c1d1e1f21222324262728292b2c2d2e', - 'ecb-tbl-128: I=83'), - ('6d6c6f6e5053525d8c8d8a8badd2d3d0', '5df534ffad4ed0749a9988e9849d0021', - '30313233353637383a3b3c3d3f404142', - 'ecb-tbl-128: I=84'), - ('28292a2b393a3b3c0607181903040506', 'a48bee75db04fb60ca2b80f752a8421b', - '44454647494a4b4c4e4f505153545556', - 'ecb-tbl-128: I=85'), - ('a5a4a7a6b0b3b28ddbdadddcbdb2b3b0', '024c8cf70bc86ee5ce03678cb7af45f9', - '58595a5b5d5e5f60626364656768696a', - 'ecb-tbl-128: I=86'), - ('323330316467666130313e3f2c2b2a29', '3c19ac0f8a3a3862ce577831301e166b', - '6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-128: I=87'), - ('27262524080b0a05171611100b141516', 'c5e355b796a57421d59ca6be82e73bca', - '80818283858687888a8b8c8d8f909192', - 'ecb-tbl-128: I=88'), - ('040506074142434435340b0aa3a4a5a6', 'd94033276417abfb05a69d15b6e386e2', - '94959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-128: I=89'), - ('242526271112130c61606766bdb2b3b0', '24b36559ea3a9b9b958fe6da3e5b8d85', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9ba', - 'ecb-tbl-128: I=90'), - ('4b4a4948252627209e9f9091cec9c8cb', '20fd4feaa0e8bf0cce7861d74ef4cb72', - 'bcbdbebfc1c2c3c4c6c7c8c9cbcccdce', - 'ecb-tbl-128: I=91'), - ('68696a6b6665646b9f9e9998d9e6e7e4', '350e20d5174277b9ec314c501570a11d', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2', - 'ecb-tbl-128: I=92'), - ('34353637c5c6c7c0f0f1eeef7c7b7a79', '87a29d61b7c604d238fe73045a7efd57', - 'e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-128: I=93'), - ('32333031c2c1c13f0d0c0b0a050a0b08', '2c3164c1cc7d0064816bdc0faa362c52', - 'f8f9fafbfdfefe00020304050708090a', - 'ecb-tbl-128: I=94'), - ('cdcccfcebebdbcbbabaaa5a4181f1e1d', '195fe5e8a05a2ed594f6e4400eee10b3', - '0c0d0e0f11121314161718191b1c1d1e', - 'ecb-tbl-128: I=95'), - ('212023223635343ba0a1a6a7445b5a59', 'e4663df19b9a21a5a284c2bd7f905025', - '20212223252627282a2b2c2d2f303132', - 'ecb-tbl-128: I=96'), - ('0e0f0c0da8abaaad2f2e515002050407', '21b88714cfb4e2a933bd281a2c4743fd', - '34353637393a3b3c3e3f404143444546', - 'ecb-tbl-128: I=97'), - ('070605042a2928378e8f8889bdb2b3b0', 'cbfc3980d704fd0fc54378ab84e17870', - '48494a4b4d4e4f50525354555758595a', - 'ecb-tbl-128: I=98'), - ('cbcac9c893909196a9a8a7a6a5a2a3a0', 'bc5144baa48bdeb8b63e22e03da418ef', - '5c5d5e5f61626364666768696b6c6d6e', - 'ecb-tbl-128: I=99'), - ('80818283c1c2c3cc9c9d9a9b0cf3f2f1', '5a1dbaef1ee2984b8395da3bdffa3ccc', - '70717273757677787a7b7c7d7f808182', - 'ecb-tbl-128: I=100'), - ('1213101125262720fafbe4e5b1b6b7b4', 'f0b11cd0729dfcc80cec903d97159574', - '84858687898a8b8c8e8f909193949596', - 'ecb-tbl-128: I=101'), - ('7f7e7d7c3033320d97969190222d2c2f', '9f95314acfddc6d1914b7f19a9cc8209', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aa', - 'ecb-tbl-128: I=102'), - ('4e4f4c4d484b4a4d81808f8e53545556', '595736f6f0f70914a94e9e007f022519', - 'acadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-128: I=103'), - ('dcdddedfb0b3b2bd15141312a1bebfbc', '1f19f57892cae586fcdfb4c694deb183', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2', - 'ecb-tbl-128: I=104'), - ('93929190282b2a2dc4c5fafb92959497', '540700ee1f6f3dab0b3eddf6caee1ef5', - 'd4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-128: I=105'), - ('f5f4f7f6c4c7c6d9373631307e717073', '14a342a91019a331687a2254e6626ca2', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fa', - 'ecb-tbl-128: I=106'), - ('93929190b6b5b4b364656a6b05020300', '7b25f3c3b2eea18d743ef283140f29ff', - 'fcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-128: I=107'), - ('babbb8b90d0e0f00a4a5a2a3043b3a39', '46c2587d66e5e6fa7f7ca6411ad28047', - '10111213151617181a1b1c1d1f202122', - 'ecb-tbl-128: I=108'), - ('d8d9dadb7f7c7d7a10110e0f787f7e7d', '09470e72229d954ed5ee73886dfeeba9', - '24252627292a2b2c2e2f303133343536', - 'ecb-tbl-128: I=109'), - ('fefffcfdefeced923b3a3d3c6768696a', 'd77c03de92d4d0d79ef8d4824ef365eb', - '38393a3b3d3e3f40424344454748494a', - 'ecb-tbl-128: I=110'), - ('d6d7d4d58a89888f96979899a5a2a3a0', '1d190219f290e0f1715d152d41a23593', - '4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-128: I=111'), - ('18191a1ba8abaaa5303136379b848586', 'a2cd332ce3a0818769616292e87f757b', - '60616263656667686a6b6c6d6f707172', - 'ecb-tbl-128: I=112'), - ('6b6a6968a4a7a6a1d6d72829b0b7b6b5', 'd54afa6ce60fbf9341a3690e21385102', - '74757677797a7b7c7e7f808183848586', - 'ecb-tbl-128: I=113'), - ('000102038a89889755545352a6a9a8ab', '06e5c364ded628a3f5e05e613e356f46', - '88898a8b8d8e8f90929394959798999a', - 'ecb-tbl-128: I=114'), - ('2d2c2f2eb3b0b1b6b6b7b8b9f2f5f4f7', 'eae63c0e62556dac85d221099896355a', - '9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-128: I=115'), - ('979695943536373856575051e09f9e9d', '1fed060e2c6fc93ee764403a889985a2', - 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2', - 'ecb-tbl-128: I=116'), - ('a4a5a6a7989b9a9db1b0afae7a7d7c7f', 'c25235c1a30fdec1c7cb5c5737b2a588', - 'c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-128: I=117'), - ('c1c0c3c2686b6a55a8a9aeafeae5e4e7', '796dbef95147d4d30873ad8b7b92efc0', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9ea', - 'ecb-tbl-128: I=118'), - ('c1c0c3c2141716118c8d828364636261', 'cbcf0fb34d98d0bd5c22ce37211a46bf', - 'ecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-128: I=119'), - ('93929190cccfcec196979091e0fffefd', '94b44da6466126cafa7c7fd09063fc24', - '00010203050607080a0b0c0d0f101112', - 'ecb-tbl-128: I=120'), - ('b4b5b6b7f9fafbfc25241b1a6e69686b', 'd78c5b5ebf9b4dbda6ae506c5074c8fe', - '14151617191a1b1c1e1f202123242526', - 'ecb-tbl-128: I=121'), - ('868784850704051ac7c6c1c08788898a', '6c27444c27204b043812cf8cf95f9769', - '28292a2b2d2e2f30323334353738393a', - 'ecb-tbl-128: I=122'), - ('f4f5f6f7aaa9a8affdfcf3f277707172', 'be94524ee5a2aa50bba8b75f4c0aebcf', - '3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-128: I=123'), - ('d3d2d1d00605040bc3c2c5c43e010003', 'a0aeaae91ba9f31f51aeb3588cf3a39e', - '50515253555657585a5b5c5d5f606162', - 'ecb-tbl-128: I=124'), - ('73727170424140476a6b74750d0a0b08', '275297779c28266ef9fe4c6a13c08488', - '64656667696a6b6c6e6f707173747576', - 'ecb-tbl-128: I=125'), - ('c2c3c0c10a0908f754555253a1aeafac', '86523d92bb8672cb01cf4a77fd725882', - '78797a7b7d7e7f80828384858788898a', - 'ecb-tbl-128: I=126'), - ('6d6c6f6ef8fbfafd82838c8df8fffefd', '4b8327640e9f33322a04dd96fcbf9a36', - '8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-128: I=127'), - ('f5f4f7f684878689a6a7a0a1d2cdcccf', 'ce52af650d088ca559425223f4d32694', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2', - 'ecb-tbl-128: I=128'), - - # ecb_tbl.txt, KEYSIZE=192 - ('2d33eef2c0430a8a9ebf45e809c40bb6', 'dff4945e0336df4c1c56bc700eff837f', - '00010203050607080a0b0c0d0f10111214151617191a1b1c', - 'ecb-tbl-192: I=1'), - ('6aa375d1fa155a61fb72353e0a5a8756', 'b6fddef4752765e347d5d2dc196d1252', - '1e1f20212324252628292a2b2d2e2f30323334353738393a', - 'ecb-tbl-192: I=2'), - ('bc3736518b9490dcb8ed60eb26758ed4', 'd23684e3d963b3afcf1a114aca90cbd6', - '3c3d3e3f41424344464748494b4c4d4e5051525355565758', - 'ecb-tbl-192: I=3'), - ('aa214402b46cffb9f761ec11263a311e', '3a7ac027753e2a18c2ceab9e17c11fd0', - '5a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-192: I=4'), - ('02aea86e572eeab66b2c3af5e9a46fd6', '8f6786bd007528ba26603c1601cdd0d8', - '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394', - 'ecb-tbl-192: I=5'), - ('e2aef6acc33b965c4fa1f91c75ff6f36', 'd17d073b01e71502e28b47ab551168b3', - '969798999b9c9d9ea0a1a2a3a5a6a7a8aaabacadafb0b1b2', - 'ecb-tbl-192: I=6'), - ('0659df46427162b9434865dd9499f91d', 'a469da517119fab95876f41d06d40ffa', - 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6c8c9cacbcdcecfd0', - 'ecb-tbl-192: I=7'), - ('49a44239c748feb456f59c276a5658df', '6091aa3b695c11f5c0b6ad26d3d862ff', - 'd2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-192: I=8'), - ('66208f6e9d04525bdedb2733b6a6be37', '70f9e67f9f8df1294131662dc6e69364', - 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c', - 'ecb-tbl-192: I=9'), - ('3393f8dfc729c97f5480b950bc9666b0', 'd154dcafad8b207fa5cbc95e9996b559', - '0e0f10111314151618191a1b1d1e1f20222324252728292a', - 'ecb-tbl-192: I=10'), - ('606834c8ce063f3234cf1145325dbd71', '4934d541e8b46fa339c805a7aeb9e5da', - '2c2d2e2f31323334363738393b3c3d3e4041424345464748', - 'ecb-tbl-192: I=11'), - ('fec1c04f529bbd17d8cecfcc4718b17f', '62564c738f3efe186e1a127a0c4d3c61', - '4a4b4c4d4f50515254555657595a5b5c5e5f606163646566', - 'ecb-tbl-192: I=12'), - ('32df99b431ed5dc5acf8caf6dc6ce475', '07805aa043986eb23693e23bef8f3438', - '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384', - 'ecb-tbl-192: I=13'), - ('7fdc2b746f3f665296943b83710d1f82', 'df0b4931038bade848dee3b4b85aa44b', - '868788898b8c8d8e90919293959697989a9b9c9d9fa0a1a2', - 'ecb-tbl-192: I=14'), - ('8fba1510a3c5b87e2eaa3f7a91455ca2', '592d5fded76582e4143c65099309477c', - 'a4a5a6a7a9aaabacaeafb0b1b3b4b5b6b8b9babbbdbebfc0', - 'ecb-tbl-192: I=15'), - ('2c9b468b1c2eed92578d41b0716b223b', 'c9b8d6545580d3dfbcdd09b954ed4e92', - 'c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', - 'ecb-tbl-192: I=16'), - ('0a2bbf0efc6bc0034f8a03433fca1b1a', '5dccd5d6eb7c1b42acb008201df707a0', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfc', - 'ecb-tbl-192: I=17'), - ('25260e1f31f4104d387222e70632504b', 'a2a91682ffeb6ed1d34340946829e6f9', - 'fefe01010304050608090a0b0d0e0f10121314151718191a', - 'ecb-tbl-192: I=18'), - ('c527d25a49f08a5228d338642ae65137', 'e45d185b797000348d9267960a68435d', - '1c1d1e1f21222324262728292b2c2d2e3031323335363738', - 'ecb-tbl-192: I=19'), - ('3b49fc081432f5890d0e3d87e884a69e', '45e060dae5901cda8089e10d4f4c246b', - '3a3b3c3d3f40414244454647494a4b4c4e4f505153545556', - 'ecb-tbl-192: I=20'), - ('d173f9ed1e57597e166931df2754a083', 'f6951afacc0079a369c71fdcff45df50', - '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374', - 'ecb-tbl-192: I=21'), - ('8c2b7cafa5afe7f13562daeae1adede0', '9e95e00f351d5b3ac3d0e22e626ddad6', - '767778797b7c7d7e80818283858687888a8b8c8d8f909192', - 'ecb-tbl-192: I=22'), - ('aaf4ec8c1a815aeb826cab741339532c', '9cb566ff26d92dad083b51fdc18c173c', - '94959697999a9b9c9e9fa0a1a3a4a5a6a8a9aaabadaeafb0', - 'ecb-tbl-192: I=23'), - ('40be8c5d9108e663f38f1a2395279ecf', 'c9c82766176a9b228eb9a974a010b4fb', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebec', - 'ecb-tbl-192: I=24'), - ('0c8ad9bc32d43e04716753aa4cfbe351', 'd8e26aa02945881d5137f1c1e1386e88', - '2a2b2c2d2f30313234353637393a3b3c3e3f404143444546', - 'ecb-tbl-192: I=25'), - ('1407b1d5f87d63357c8dc7ebbaebbfee', 'c0e024ccd68ff5ffa4d139c355a77c55', - '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364', - 'ecb-tbl-192: I=26'), - ('e62734d1ae3378c4549e939e6f123416', '0b18b3d16f491619da338640df391d43', - '84858687898a8b8c8e8f90919394959698999a9b9d9e9fa0', - 'ecb-tbl-192: I=27'), - ('5a752cff2a176db1a1de77f2d2cdee41', 'dbe09ac8f66027bf20cb6e434f252efc', - 'a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-192: I=28'), - ('a9c8c3a4eabedc80c64730ddd018cd88', '6d04e5e43c5b9cbe05feb9606b6480fe', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdc', - 'ecb-tbl-192: I=29'), - ('ee9b3dbbdb86180072130834d305999a', 'dd1d6553b96be526d9fee0fbd7176866', - '1a1b1c1d1f20212224252627292a2b2c2e2f303133343536', - 'ecb-tbl-192: I=30'), - ('a7fa8c3586b8ebde7568ead6f634a879', '0260ca7e3f979fd015b0dd4690e16d2a', - '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354', - 'ecb-tbl-192: I=31'), - ('37e0f4a87f127d45ac936fe7ad88c10a', '9893734de10edcc8a67c3b110b8b8cc6', - '929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-192: I=32'), - ('3f77d8b5d92bac148e4e46f697a535c5', '93b30b750516b2d18808d710c2ee84ef', - '464748494b4c4d4e50515253555657585a5b5c5d5f606162', - 'ecb-tbl-192: I=33'), - ('d25ebb686c40f7e2c4da1014936571ca', '16f65fa47be3cb5e6dfe7c6c37016c0e', - '828384858788898a8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-192: I=34'), - ('4f1c769d1e5b0552c7eca84dea26a549', 'f3847210d5391e2360608e5acb560581', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbc', - 'ecb-tbl-192: I=35'), - ('8548e2f882d7584d0fafc54372b6633a', '8754462cd223366d0753913e6af2643d', - 'bebfc0c1c3c4c5c6c8c9cacbcdcecfd0d2d3d4d5d7d8d9da', - 'ecb-tbl-192: I=36'), - ('87d7a336cb476f177cd2a51af2a62cdf', '1ea20617468d1b806a1fd58145462017', - 'dcdddedfe1e2e3e4e6e7e8e9ebecedeef0f1f2f3f5f6f7f8', - 'ecb-tbl-192: I=37'), - ('03b1feac668c4e485c1065dfc22b44ee', '3b155d927355d737c6be9dda60136e2e', - 'fafbfcfdfe01000204050607090a0b0c0e0f101113141516', - 'ecb-tbl-192: I=38'), - ('bda15e66819fa72d653a6866aa287962', '26144f7b66daa91b6333dbd3850502b3', - '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334', - 'ecb-tbl-192: I=39'), - ('4d0c7a0d2505b80bf8b62ceb12467f0a', 'e4f9a4ab52ced8134c649bf319ebcc90', - '363738393b3c3d3e40414243454647484a4b4c4d4f505152', - 'ecb-tbl-192: I=40'), - ('626d34c9429b37211330986466b94e5f', 'b9ddd29ac6128a6cab121e34a4c62b36', - '54555657595a5b5c5e5f60616364656668696a6b6d6e6f70', - 'ecb-tbl-192: I=41'), - ('333c3e6bf00656b088a17e5ff0e7f60a', '6fcddad898f2ce4eff51294f5eaaf5c9', - '727374757778797a7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-192: I=42'), - ('687ed0cdc0d2a2bc8c466d05ef9d2891', 'c9a6fe2bf4028080bea6f7fc417bd7e3', - '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabac', - 'ecb-tbl-192: I=43'), - ('487830e78cc56c1693e64b2a6660c7b6', '6a2026846d8609d60f298a9c0673127f', - 'aeafb0b1b3b4b5b6b8b9babbbdbebfc0c2c3c4c5c7c8c9ca', - 'ecb-tbl-192: I=44'), - ('7a48d6b7b52b29392aa2072a32b66160', '2cb25c005e26efea44336c4c97a4240b', - 'cccdcecfd1d2d3d4d6d7d8d9dbdcdddee0e1e2e3e5e6e7e8', - 'ecb-tbl-192: I=45'), - ('907320e64c8c5314d10f8d7a11c8618d', '496967ab8680ddd73d09a0e4c7dcc8aa', - 'eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', - 'ecb-tbl-192: I=46'), - ('b561f2ca2d6e65a4a98341f3ed9ff533', 'd5af94de93487d1f3a8c577cb84a66a4', - '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324', - 'ecb-tbl-192: I=47'), - ('df769380d212792d026f049e2e3e48ef', '84bdac569cae2828705f267cc8376e90', - '262728292b2c2d2e30313233353637383a3b3c3d3f404142', - 'ecb-tbl-192: I=48'), - ('79f374bc445bdabf8fccb8843d6054c6', 'f7401dda5ad5ab712b7eb5d10c6f99b6', - '44454647494a4b4c4e4f50515354555658595a5b5d5e5f60', - 'ecb-tbl-192: I=49'), - ('4e02f1242fa56b05c68dbae8fe44c9d6', '1c9d54318539ebd4c3b5b7e37bf119f0', - '626364656768696a6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-192: I=50'), - ('cf73c93cbff57ac635a6f4ad2a4a1545', 'aca572d65fb2764cffd4a6eca090ea0d', - '80818283858687888a8b8c8d8f90919294959697999a9b9c', - 'ecb-tbl-192: I=51'), - ('9923548e2875750725b886566784c625', '36d9c627b8c2a886a10ccb36eae3dfbb', - '9e9fa0a1a3a4a5a6a8a9aaabadaeafb0b2b3b4b5b7b8b9ba', - 'ecb-tbl-192: I=52'), - ('4888336b723a022c9545320f836a4207', '010edbf5981e143a81d646e597a4a568', - 'bcbdbebfc1c2c3c4c6c7c8c9cbcccdced0d1d2d3d5d6d7d8', - 'ecb-tbl-192: I=53'), - ('f84d9a5561b0608b1160dee000c41ba8', '8db44d538dc20cc2f40f3067fd298e60', - 'dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-192: I=54'), - ('c23192a0418e30a19b45ae3e3625bf22', '930eb53bc71e6ac4b82972bdcd5aafb3', - 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314', - 'ecb-tbl-192: I=55'), - ('b84e0690b28b0025381ad82a15e501a7', '6c42a81edcbc9517ccd89c30c95597b4', - '161718191b1c1d1e20212223252627282a2b2c2d2f303132', - 'ecb-tbl-192: I=56'), - ('acef5e5c108876c4f06269f865b8f0b0', 'da389847ad06df19d76ee119c71e1dd3', - '34353637393a3b3c3e3f40414344454648494a4b4d4e4f50', - 'ecb-tbl-192: I=57'), - ('0f1b3603e0f5ddea4548246153a5e064', 'e018fdae13d3118f9a5d1a647a3f0462', - '525354555758595a5c5d5e5f61626364666768696b6c6d6e', - 'ecb-tbl-192: I=58'), - ('fbb63893450d42b58c6d88cd3c1809e3', '2aa65db36264239d3846180fabdfad20', - '70717273757677787a7b7c7d7f80818284858687898a8b8c', - 'ecb-tbl-192: I=59'), - ('4bef736df150259dae0c91354e8a5f92', '1472163e9a4f780f1ceb44b07ecf4fdb', - '8e8f90919394959698999a9b9d9e9fa0a2a3a4a5a7a8a9aa', - 'ecb-tbl-192: I=60'), - ('7d2d46242056ef13d3c3fc93c128f4c7', 'c8273fdc8f3a9f72e91097614b62397c', - 'acadaeafb1b2b3b4b6b7b8b9bbbcbdbec0c1c2c3c5c6c7c8', - 'ecb-tbl-192: I=61'), - ('e9c1ba2df415657a256edb33934680fd', '66c8427dcd733aaf7b3470cb7d976e3f', - 'cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-192: I=62'), - ('e23ee277b0aa0a1dfb81f7527c3514f1', '146131cb17f1424d4f8da91e6f80c1d0', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304', - 'ecb-tbl-192: I=63'), - ('3e7445b0b63caaf75e4a911e12106b4c', '2610d0ad83659081ae085266a88770dc', - '060708090b0c0d0e10111213151617181a1b1c1d1f202122', - 'ecb-tbl-192: I=64'), - ('767774752023222544455a5be6e1e0e3', '38a2b5a974b0575c5d733917fb0d4570', - '24252627292a2b2c2e2f30313334353638393a3b3d3e3f40', - 'ecb-tbl-192: I=65'), - ('72737475717e7f7ce9e8ebea696a6b6c', 'e21d401ebc60de20d6c486e4f39a588b', - '424344454748494a4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-192: I=66'), - ('dfdedddc25262728c9c8cfcef1eeefec', 'e51d5f88c670b079c0ca1f0c2c4405a2', - '60616263656667686a6b6c6d6f70717274757677797a7b7c', - 'ecb-tbl-192: I=67'), - ('fffe0100707776755f5e5d5c7675746b', '246a94788a642fb3d1b823c8762380c8', - '7e7f80818384858688898a8b8d8e8f90929394959798999a', - 'ecb-tbl-192: I=68'), - ('e0e1e2e3424140479f9e9190292e2f2c', 'b80c391c5c41a4c3b30c68e0e3d7550f', - '9c9d9e9fa1a2a3a4a6a7a8a9abacadaeb0b1b2b3b5b6b7b8', - 'ecb-tbl-192: I=69'), - ('2120272690efeeed3b3a39384e4d4c4b', 'b77c4754fc64eb9a1154a9af0bb1f21c', - 'babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-192: I=70'), - ('ecedeeef5350516ea1a0a7a6a3acadae', 'fb554de520d159a06bf219fc7f34a02f', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4', - 'ecb-tbl-192: I=71'), - ('32333c3d25222320e9e8ebeacecdccc3', 'a89fba152d76b4927beed160ddb76c57', - 'f6f7f8f9fbfcfdfe00010203050607080a0b0c0d0f101112', - 'ecb-tbl-192: I=72'), - ('40414243626160678a8bb4b511161714', '5676eab4a98d2e8473b3f3d46424247c', - '14151617191a1b1c1e1f20212324252628292a2b2d2e2f30', - 'ecb-tbl-192: I=73'), - ('94959293f5fafbf81f1e1d1c7c7f7e79', '4e8f068bd7ede52a639036ec86c33568', - '323334353738393a3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-192: I=74'), - ('bebfbcbd191a1b14cfcec9c8546b6a69', 'f0193c4d7aff1791ee4c07eb4a1824fc', - '50515253555657585a5b5c5d5f60616264656667696a6b6c', - 'ecb-tbl-192: I=75'), - ('2c2d3233898e8f8cbbbab9b8333031ce', 'ac8686eeca9ba761afe82d67b928c33f', - '6e6f70717374757678797a7b7d7e7f80828384858788898a', - 'ecb-tbl-192: I=76'), - ('84858687bfbcbdba37363938fdfafbf8', '5faf8573e33b145b6a369cd3606ab2c9', - '8c8d8e8f91929394969798999b9c9d9ea0a1a2a3a5a6a7a8', - 'ecb-tbl-192: I=77'), - ('828384857669686b909192930b08090e', '31587e9944ab1c16b844ecad0df2e7da', - 'aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-192: I=78'), - ('bebfbcbd9695948b707176779e919093', 'd017fecd91148aba37f6f3068aa67d8a', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4', - 'ecb-tbl-192: I=79'), - ('8b8a85846067666521202322d0d3d2dd', '788ef2f021a73cba2794b616078a8500', - 'e6e7e8e9ebecedeef0f1f2f3f5f6f7f8fafbfcfdfe010002', - 'ecb-tbl-192: I=80'), - ('76777475f1f2f3f4f8f9e6e777707172', '5d1ef20dced6bcbc12131ac7c54788aa', - '04050607090a0b0c0e0f10111314151618191a1b1d1e1f20', - 'ecb-tbl-192: I=81'), - ('a4a5a2a34f404142b4b5b6b727242522', 'b3c8cf961faf9ea05fdde6d1e4d8f663', - '222324252728292a2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-192: I=82'), - ('94959697e1e2e3ec16171011839c9d9e', '143075c70605861c7fac6526199e459f', - '40414243454647484a4b4c4d4f50515254555657595a5b5c', - 'ecb-tbl-192: I=83'), - ('03023d3c06010003dedfdcddfffcfde2', 'a5ae12eade9a87268d898bfc8fc0252a', - '5e5f60616364656668696a6b6d6e6f70727374757778797a', - 'ecb-tbl-192: I=84'), - ('10111213f1f2f3f4cecfc0c1dbdcddde', '0924f7cf2e877a4819f5244a360dcea9', - '7c7d7e7f81828384868788898b8c8d8e9091929395969798', - 'ecb-tbl-192: I=85'), - ('67666160724d4c4f1d1c1f1e73707176', '3d9e9635afcc3e291cc7ab3f27d1c99a', - '9a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-192: I=86'), - ('e6e7e4e5a8abaad584858283909f9e9d', '9d80feebf87510e2b8fb98bb54fd788c', - 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4', - 'ecb-tbl-192: I=87'), - ('71707f7e565150537d7c7f7e6162636c', '5f9d1a082a1a37985f174002eca01309', - 'd6d7d8d9dbdcdddee0e1e2e3e5e6e7e8eaebecedeff0f1f2', - 'ecb-tbl-192: I=88'), - ('64656667212223245555aaaa03040506', 'a390ebb1d1403930184a44b4876646e4', - 'f4f5f6f7f9fafbfcfefe01010304050608090a0b0d0e0f10', - 'ecb-tbl-192: I=89'), - ('9e9f9899aba4a5a6cfcecdcc2b28292e', '700fe918981c3195bb6c4bcb46b74e29', - '121314151718191a1c1d1e1f21222324262728292b2c2d2e', - 'ecb-tbl-192: I=90'), - ('c7c6c5c4d1d2d3dc626364653a454447', '907984406f7bf2d17fb1eb15b673d747', - '30313233353637383a3b3c3d3f40414244454647494a4b4c', - 'ecb-tbl-192: I=91'), - ('f6f7e8e9e0e7e6e51d1c1f1e5b585966', 'c32a956dcfc875c2ac7c7cc8b8cc26e1', - '4e4f50515354555658595a5b5d5e5f60626364656768696a', - 'ecb-tbl-192: I=92'), - ('bcbdbebf5d5e5f5868696667f4f3f2f1', '02646e2ebfa9b820cf8424e9b9b6eb51', - '6c6d6e6f71727374767778797b7c7d7e8081828385868788', - 'ecb-tbl-192: I=93'), - ('40414647b0afaead9b9a99989b98999e', '621fda3a5bbd54c6d3c685816bd4ead8', - '8a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-192: I=94'), - ('69686b6a0201001f0f0e0908b4bbbab9', 'd4e216040426dfaf18b152469bc5ac2f', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4', - 'ecb-tbl-192: I=95'), - ('c7c6c9c8d8dfdedd5a5b5859bebdbcb3', '9d0635b9d33b6cdbd71f5d246ea17cc8', - 'c6c7c8c9cbcccdced0d1d2d3d5d6d7d8dadbdcdddfe0e1e2', - 'ecb-tbl-192: I=96'), - ('dedfdcdd787b7a7dfffee1e0b2b5b4b7', '10abad1bd9bae5448808765583a2cc1a', - 'e4e5e6e7e9eaebeceeeff0f1f3f4f5f6f8f9fafbfdfefe00', - 'ecb-tbl-192: I=97'), - ('4d4c4b4a606f6e6dd0d1d2d3fbf8f9fe', '6891889e16544e355ff65a793c39c9a8', - '020304050708090a0c0d0e0f11121314161718191b1c1d1e', - 'ecb-tbl-192: I=98'), - ('b7b6b5b4d7d4d5dae5e4e3e2e1fefffc', 'cc735582e68072c163cd9ddf46b91279', - '20212223252627282a2b2c2d2f30313234353637393a3b3c', - 'ecb-tbl-192: I=99'), - ('cecfb0b1f7f0f1f2aeafacad3e3d3c23', 'c5c68b9aeeb7f878df578efa562f9574', - '3e3f40414344454648494a4b4d4e4f50525354555758595a', - 'ecb-tbl-192: I=100'), - ('cacbc8c9cdcecfc812131c1d494e4f4c', '5f4764395a667a47d73452955d0d2ce8', - '5c5d5e5f61626364666768696b6c6d6e7071727375767778', - 'ecb-tbl-192: I=101'), - ('9d9c9b9ad22d2c2fb1b0b3b20c0f0e09', '701448331f66106cefddf1eb8267c357', - '7a7b7c7d7f80818284858687898a8b8c8e8f909193949596', - 'ecb-tbl-192: I=102'), - ('7a7b787964676659959493924f404142', 'cb3ee56d2e14b4e1941666f13379d657', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4', - 'ecb-tbl-192: I=103'), - ('aaaba4a5cec9c8cb1f1e1d1caba8a9a6', '9fe16efd18ab6e1981191851fedb0764', - 'b6b7b8b9bbbcbdbec0c1c2c3c5c6c7c8cacbcccdcfd0d1d2', - 'ecb-tbl-192: I=104'), - ('93929190282b2a2dc4c5fafb92959497', '3dc9ba24e1b223589b147adceb4c8e48', - 'd4d5d6d7d9dadbdcdedfe0e1e3e4e5e6e8e9eaebedeeeff0', - 'ecb-tbl-192: I=105'), - ('efeee9e8ded1d0d339383b3a888b8a8d', '1c333032682e7d4de5e5afc05c3e483c', - 'f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-192: I=106'), - ('7f7e7d7ca2a1a0af78797e7f112e2f2c', 'd593cc99a95afef7e92038e05a59d00a', - '10111213151617181a1b1c1d1f20212224252627292a2b2c', - 'ecb-tbl-192: I=107'), - ('84859a9b2b2c2d2e868784852625245b', '51e7f96f53b4353923452c222134e1ec', - '2e2f30313334353638393a3b3d3e3f40424344454748494a', - 'ecb-tbl-192: I=108'), - ('b0b1b2b3070405026869666710171615', '4075b357a1a2b473400c3b25f32f81a4', - '4c4d4e4f51525354565758595b5c5d5e6061626365666768', - 'ecb-tbl-192: I=109'), - ('acadaaabbda2a3a00d0c0f0e595a5b5c', '302e341a3ebcd74f0d55f61714570284', - '6a6b6c6d6f70717274757677797a7b7c7e7f808183848586', - 'ecb-tbl-192: I=110'), - ('121310115655544b5253545569666764', '57abdd8231280da01c5042b78cf76522', - '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4', - 'ecb-tbl-192: I=111'), - ('dedfd0d166616063eaebe8e94142434c', '17f9ea7eea17ac1adf0e190fef799e92', - 'a6a7a8a9abacadaeb0b1b2b3b5b6b7b8babbbcbdbfc0c1c2', - 'ecb-tbl-192: I=112'), - ('dbdad9d81417161166677879e0e7e6e5', '2e1bdd563dd87ee5c338dd6d098d0a7a', - 'c4c5c6c7c9cacbcccecfd0d1d3d4d5d6d8d9dadbdddedfe0', - 'ecb-tbl-192: I=113'), - ('6a6b6c6de0efeeed2b2a2928c0c3c2c5', 'eb869996e6f8bfb2bfdd9e0c4504dbb2', - 'e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-192: I=114'), - ('b1b0b3b21714151a1a1b1c1d5649484b', 'c2e01549e9decf317468b3e018c61ba8', - '00010203050607080a0b0c0d0f10111214151617191a1b1c', - 'ecb-tbl-192: I=115'), - ('39380706a3a4a5a6c4c5c6c77271706f', '8da875d033c01dd463b244a1770f4a22', - '1e1f20212324252628292a2b2d2e2f30323334353738393a', - 'ecb-tbl-192: I=116'), - ('5c5d5e5f1013121539383736e2e5e4e7', '8ba0dcf3a186844f026d022f8839d696', - '3c3d3e3f41424344464748494b4c4d4e5051525355565758', - 'ecb-tbl-192: I=117'), - ('43424544ead5d4d72e2f2c2d64676661', 'e9691ff9a6cc6970e51670a0fd5b88c1', - '5a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-192: I=118'), - ('55545756989b9a65f8f9feff18171615', 'f2baec06faeed30f88ee63ba081a6e5b', - '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394', - 'ecb-tbl-192: I=119'), - ('05040b0a525554573c3d3e3f4a494847', '9c39d4c459ae5753394d6094adc21e78', - '969798999b9c9d9ea0a1a2a3a5a6a7a8aaabacadafb0b1b2', - 'ecb-tbl-192: I=120'), - ('14151617595a5b5c8584fbfa8e89888b', '6345b532a11904502ea43ba99c6bd2b2', - 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6c8c9cacbcdcecfd0', - 'ecb-tbl-192: I=121'), - ('7c7d7a7bfdf2f3f029282b2a51525354', '5ffae3061a95172e4070cedce1e428c8', - 'd2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-192: I=122'), - ('38393a3b1e1d1c1341404746c23d3c3e', '0a4566be4cdf9adce5dec865b5ab34cd', - 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c', - 'ecb-tbl-192: I=123'), - ('8d8c939240474645818083827c7f7e41', 'ca17fcce79b7404f2559b22928f126fb', - '0e0f10111314151618191a1b1d1e1f20222324252728292a', - 'ecb-tbl-192: I=124'), - ('3b3a39381a19181f32333c3d45424340', '97ca39b849ed73a6470a97c821d82f58', - '2c2d2e2f31323334363738393b3c3d3e4041424345464748', - 'ecb-tbl-192: I=125'), - ('f0f1f6f738272625828380817f7c7d7a', '8198cb06bc684c6d3e9b7989428dcf7a', - '4a4b4c4d4f50515254555657595a5b5c5e5f606163646566', - 'ecb-tbl-192: I=126'), - ('89888b8a0407061966676061141b1a19', 'f53c464c705ee0f28d9a4c59374928bd', - '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384', - 'ecb-tbl-192: I=127'), - ('d3d2dddcaaadacaf9c9d9e9fe8ebeae5', '9adb3d4cca559bb98c3e2ed73dbf1154', - '868788898b8c8d8e90919293959697989a9b9c9d9fa0a1a2', - 'ecb-tbl-192: I=128'), - - # ecb_tbl.txt, KEYSIZE=256 - ('834eadfccac7e1b30664b1aba44815ab', '1946dabf6a03a2a2c3d0b05080aed6fc', - '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', - 'ecb-tbl-256: I=1'), - ('d9dc4dba3021b05d67c0518f72b62bf1', '5ed301d747d3cc715445ebdec62f2fb4', - '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-256: I=2'), - ('a291d86301a4a739f7392173aa3c604c', '6585c8f43d13a6beab6419fc5935b9d0', - '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-256: I=3'), - ('4264b2696498de4df79788a9f83e9390', '2a5b56a596680fcc0e05f5e0f151ecae', - '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-256: I=4'), - ('ee9932b3721804d5a83ef5949245b6f6', 'f5d6ff414fd2c6181494d20c37f2b8c4', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-256: I=5'), - ('e6248f55c5fdcbca9cbbb01c88a2ea77', '85399c01f59fffb5204f19f8482f00b8', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-256: I=6'), - ('b8358e41b9dff65fd461d55a99266247', '92097b4c88a041ddf98144bc8d22e8e7', - 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', - 'ecb-tbl-256: I=7'), - ('f0e2d72260af58e21e015ab3a4c0d906', '89bd5b73b356ab412aef9f76cea2d65c', - '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-256: I=8'), - ('475b8b823ce8893db3c44a9f2a379ff7', '2536969093c55ff9454692f2fac2f530', - '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', - 'ecb-tbl-256: I=9'), - ('688f5281945812862f5f3076cf80412f', '07fc76a872843f3f6e0081ee9396d637', - '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-256: I=10'), - ('08d1d2bc750af553365d35e75afaceaa', 'e38ba8ec2aa741358dcc93e8f141c491', - '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-256: I=11'), - ('8707121f47cc3efceca5f9a8474950a1', 'd028ee23e4a89075d0b03e868d7d3a42', - 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', - 'ecb-tbl-256: I=12'), - ('e51aa0b135dba566939c3b6359a980c5', '8cd9423dfc459e547155c5d1d522e540', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', - 'ecb-tbl-256: I=13'), - ('069a007fc76a459f98baf917fedf9521', '080e9517eb1677719acf728086040ae3', - '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', - 'ecb-tbl-256: I=14'), - ('726165c1723fbcf6c026d7d00b091027', '7c1700211a3991fc0ecded0ab3e576b0', - '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', - 'ecb-tbl-256: I=15'), - ('d7c544de91d55cfcde1f84ca382200ce', 'dabcbcc855839251db51e224fbe87435', - '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-256: I=16'), - ('fed3c9a161b9b5b2bd611b41dc9da357', '68d56fad0406947a4dd27a7448c10f1d', - '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-256: I=17'), - ('4f634cdc6551043409f30b635832cf82', 'da9a11479844d1ffee24bbf3719a9925', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', - 'ecb-tbl-256: I=18'), - ('109ce98db0dfb36734d9f3394711b4e6', '5e4ba572f8d23e738da9b05ba24b8d81', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-256: I=19'), - ('4ea6dfaba2d8a02ffdffa89835987242', 'a115a2065d667e3f0b883837a6e903f8', - '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', - 'ecb-tbl-256: I=20'), - ('5ae094f54af58e6e3cdbf976dac6d9ef', '3e9e90dc33eac2437d86ad30b137e66e', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-256: I=21'), - ('764d8e8e0f29926dbe5122e66354fdbe', '01ce82d8fbcdae824cb3c48e495c3692', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-256: I=22'), - ('3f0418f888cdf29a982bf6b75410d6a9', '0c9cff163ce936faaf083cfd3dea3117', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-256: I=23'), - ('e4a3e7cb12cdd56aa4a75197a9530220', '5131ba9bd48f2bba85560680df504b52', - '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', - 'ecb-tbl-256: I=24'), - ('211677684aac1ec1a160f44c4ebf3f26', '9dc503bbf09823aec8a977a5ad26ccb2', - '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-256: I=25'), - ('d21e439ff749ac8f18d6d4b105e03895', '9a6db0c0862e506a9e397225884041d7', - '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', - 'ecb-tbl-256: I=26'), - ('d9f6ff44646c4725bd4c0103ff5552a7', '430bf9570804185e1ab6365fc6a6860c', - '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-256: I=27'), - ('0b1256c2a00b976250cfc5b0c37ed382', '3525ebc02f4886e6a5a3762813e8ce8a', - 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-256: I=28'), - ('b056447ffc6dc4523a36cc2e972a3a79', '07fa265c763779cce224c7bad671027b', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-256: I=29'), - ('5e25ca78f0de55802524d38da3fe4456', 'e8b72b4e8be243438c9fff1f0e205872', - '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', - 'ecb-tbl-256: I=30'), - ('a5bcf4728fa5eaad8567c0dc24675f83', '109d4f999a0e11ace1f05e6b22cbcb50', - '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-256: I=31'), - ('814e59f97ed84646b78b2ca022e9ca43', '45a5e8d4c3ed58403ff08d68a0cc4029', - '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-256: I=32'), - ('15478beec58f4775c7a7f5d4395514d7', '196865964db3d417b6bd4d586bcb7634', - '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-256: I=33'), - ('253548ffca461c67c8cbc78cd59f4756', '60436ad45ac7d30d99195f815d98d2ae', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-256: I=34'), - ('fd7ad8d73b9b0f8cc41600640f503d65', 'bb07a23f0b61014b197620c185e2cd75', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-256: I=35'), - ('06199de52c6cbf8af954cd65830bcd56', '5bc0b2850129c854423aff0751fe343b', - 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', - 'ecb-tbl-256: I=36'), - ('f17c4ffe48e44c61bd891e257e725794', '7541a78f96738e6417d2a24bd2beca40', - '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-256: I=37'), - ('9a5b4a402a3e8a59be6bf5cd8154f029', 'b0a303054412882e464591f1546c5b9e', - '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', - 'ecb-tbl-256: I=38'), - ('79bd40b91a7e07dc939d441782ae6b17', '778c06d8a355eeee214fcea14b4e0eef', - '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-256: I=39'), - ('d8ceaaf8976e5fbe1012d8c84f323799', '09614206d15cbace63227d06db6beebb', - '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-256: I=40'), - ('3316e2751e2e388b083da23dd6ac3fbe', '41b97fb20e427a9fdbbb358d9262255d', - 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', - 'ecb-tbl-256: I=41'), - ('8b7cfbe37de7dca793521819242c5816', 'c1940f703d845f957652c2d64abd7adf', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', - 'ecb-tbl-256: I=42'), - ('f23f033c0eebf8ec55752662fd58ce68', 'd2d44fcdae5332343366db297efcf21b', - '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', - 'ecb-tbl-256: I=43'), - ('59eb34f6c8bdbacc5fc6ad73a59a1301', 'ea8196b79dbe167b6aa9896e287eed2b', - '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', - 'ecb-tbl-256: I=44'), - ('dcde8b6bd5cf7cc22d9505e3ce81261a', 'd6b0b0c4ba6c7dbe5ed467a1e3f06c2d', - '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-256: I=45'), - ('e33cf7e524fed781e7042ff9f4b35dc7', 'ec51eb295250c22c2fb01816fb72bcae', - '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-256: I=46'), - ('27963c8facdf73062867d164df6d064c', 'aded6630a07ce9c7408a155d3bd0d36f', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', - 'ecb-tbl-256: I=47'), - ('77b1ce386b551b995f2f2a1da994eef8', '697c9245b9937f32f5d1c82319f0363a', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-256: I=48'), - ('f083388b013679efcf0bb9b15d52ae5c', 'aad5ad50c6262aaec30541a1b7b5b19c', - 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314161718191b1c1d1e', - 'ecb-tbl-256: I=49'), - ('c5009e0dab55db0abdb636f2600290c8', '7d34b893855341ec625bd6875ac18c0d', - '20212223252627282a2b2c2d2f30313234353637393a3b3c3e3f404143444546', - 'ecb-tbl-256: I=50'), - ('7804881e26cd532d8514d3683f00f1b9', '7ef05105440f83862f5d780e88f02b41', - '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364666768696b6c6d6e', - 'ecb-tbl-256: I=51'), - ('46cddcd73d1eb53e675ca012870a92a3', 'c377c06403382061af2c9c93a8e70df6', - '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', - 'ecb-tbl-256: I=52'), - ('a9fb44062bb07fe130a8e8299eacb1ab', '1dbdb3ffdc052dacc83318853abc6de5', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-256: I=53'), - ('2b6ff8d7a5cc3a28a22d5a6f221af26b', '69a6eab00432517d0bf483c91c0963c7', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-256: I=54'), - ('1a9527c29b8add4b0e3e656dbb2af8b4', '0797f41dc217c80446e1d514bd6ab197', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-256: I=55'), - ('7f99cf2c75244df015eb4b0c1050aeae', '9dfd76575902a637c01343c58e011a03', - '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', - 'ecb-tbl-256: I=56'), - ('e84ff85b0d9454071909c1381646c4ed', 'acf4328ae78f34b9fa9b459747cc2658', - '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-256: I=57'), - ('89afd40f99521280d5399b12404f6db4', 'b0479aea12bac4fe2384cf98995150c6', - '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', - 'ecb-tbl-256: I=58'), - ('a09ef32dbc5119a35ab7fa38656f0329', '9dd52789efe3ffb99f33b3da5030109a', - '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-256: I=59'), - ('61773457f068c376c7829b93e696e716', 'abbb755e4621ef8f1214c19f649fb9fd', - 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-256: I=60'), - ('a34f0cae726cce41dd498747d891b967', 'da27fb8174357bce2bed0e7354f380f9', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-256: I=61'), - ('856f59496c7388ee2d2b1a27b7697847', 'c59a0663f0993838f6e5856593bdc5ef', - '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', - 'ecb-tbl-256: I=62'), - ('cb090c593ef7720bd95908fb93b49df4', 'ed60b264b5213e831607a99c0ce5e57e', - '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-256: I=63'), - ('a0ac75cd2f1923d460fc4d457ad95baf', 'e50548746846f3eb77b8c520640884ed', - '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-256: I=64'), - ('2a2b282974777689e8e9eeef525d5c5f', '28282cc7d21d6a2923641e52d188ef0c', - '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-256: I=65'), - ('909192939390919e0f0e09089788898a', '0dfa5b02abb18e5a815305216d6d4f8e', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-256: I=66'), - ('777675748d8e8f907170777649464744', '7359635c0eecefe31d673395fb46fb99', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-256: I=67'), - ('717073720605040b2d2c2b2a05fafbf9', '73c679f7d5aef2745c9737bb4c47fb36', - 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', - 'ecb-tbl-256: I=68'), - ('64656667fefdfcc31b1a1d1ca5aaaba8', 'b192bd472a4d2eafb786e97458967626', - '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-256: I=69'), - ('dbdad9d86a696867b5b4b3b2c8d7d6d5', '0ec327f6c8a2b147598ca3fde61dc6a4', - '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', - 'ecb-tbl-256: I=70'), - ('5c5d5e5fe3e0e1fe31303736333c3d3e', 'fc418eb3c41b859b38d4b6f646629729', - '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-256: I=71'), - ('545556574b48494673727574546b6a69', '30249e5ac282b1c981ea64b609f3a154', - '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-256: I=72'), - ('ecedeeefc6c5c4bb56575051f5fafbf8', '5e6e08646d12150776bb43c2d78a9703', - 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', - 'ecb-tbl-256: I=73'), - ('464744452724252ac9c8cfced2cdcccf', 'faeb3d5de652cd3447dceb343f30394a', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', - 'ecb-tbl-256: I=74'), - ('e6e7e4e54142435c878681801c131211', 'a8e88706823f6993ef80d05c1c7b2cf0', - '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', - 'ecb-tbl-256: I=75'), - ('72737071cfcccdc2f9f8fffe710e0f0c', '8ced86677e6e00a1a1b15968f2d3cce6', - '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', - 'ecb-tbl-256: I=76'), - ('505152537370714ec3c2c5c4010e0f0c', '9fc7c23858be03bdebb84e90db6786a9', - '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-256: I=77'), - ('a8a9aaab5c5f5e51aeafa8a93d222320', 'b4fbd65b33f70d8cf7f1111ac4649c36', - '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-256: I=78'), - ('dedfdcddf6f5f4eb10111617fef1f0f3', 'c5c32d5ed03c4b53cc8c1bd0ef0dbbf6', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', - 'ecb-tbl-256: I=79'), - ('bdbcbfbe5e5d5c530b0a0d0cfac5c4c7', 'd1a7f03b773e5c212464b63709c6a891', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-256: I=80'), - ('8a8b8889050606f8f4f5f2f3636c6d6e', '6b7161d8745947ac6950438ea138d028', - 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314161718191b1c1d1e', - 'ecb-tbl-256: I=81'), - ('a6a7a4a54d4e4f40b2b3b4b539262724', 'fd47a9f7e366ee7a09bc508b00460661', - '20212223252627282a2b2c2d2f30313234353637393a3b3c3e3f404143444546', - 'ecb-tbl-256: I=82'), - ('9c9d9e9fe9eaebf40e0f08099b949596', '00d40b003dc3a0d9310b659b98c7e416', - '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364666768696b6c6d6e', - 'ecb-tbl-256: I=83'), - ('2d2c2f2e1013121dcccdcacbed121310', 'eea4c79dcc8e2bda691f20ac48be0717', - '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', - 'ecb-tbl-256: I=84'), - ('f4f5f6f7edeeefd0eaebecedf7f8f9fa', 'e78f43b11c204403e5751f89d05a2509', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-256: I=85'), - ('3d3c3f3e282b2a2573727574150a0b08', 'd0f0e3d1f1244bb979931e38dd1786ef', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-256: I=86'), - ('b6b7b4b5f8fbfae5b4b5b2b3a0afaead', '042e639dc4e1e4dde7b75b749ea6f765', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-256: I=87'), - ('b7b6b5b4989b9a95878681809ba4a5a6', 'bc032fdd0efe29503a980a7d07ab46a8', - '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', - 'ecb-tbl-256: I=88'), - ('a8a9aaabe5e6e798e9e8efee4748494a', '0c93ac949c0da6446effb86183b6c910', - '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-256: I=89'), - ('ecedeeefd9dadbd4b9b8bfbe657a7b78', 'e0d343e14da75c917b4a5cec4810d7c2', - '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', - 'ecb-tbl-256: I=90'), - ('7f7e7d7c696a6b74cacbcccd929d9c9f', '0eafb821748408279b937b626792e619', - '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-256: I=91'), - ('08090a0b0605040bfffef9f8b9c6c7c4', 'fa1ac6e02d23b106a1fef18b274a553f', - 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-256: I=92'), - ('08090a0bf1f2f3ccfcfdfafb68676665', '0dadfe019cd12368075507df33c1a1e9', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-256: I=93'), - ('cacbc8c93a393837050403020d121310', '3a0879b414465d9ffbaf86b33a63a1b9', - '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', - 'ecb-tbl-256: I=94'), - ('e9e8ebea8281809f8f8e8988343b3a39', '62199fadc76d0be1805d3ba0b7d914bf', - '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-256: I=95'), - ('515053524645444bd0d1d6d7340b0a09', '1b06d6c5d333e742730130cf78e719b4', - '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-256: I=96'), - ('42434041ecefee1193929594c6c9c8cb', 'f1f848824c32e9dcdcbf21580f069329', - '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', - 'ecb-tbl-256: I=97'), - ('efeeedecc2c1c0cf76777071455a5b58', '1a09050cbd684f784d8e965e0782f28a', - 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', - 'ecb-tbl-256: I=98'), - ('5f5e5d5c3f3c3d221d1c1b1a19161714', '79c2969e7ded2ba7d088f3f320692360', - 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', - 'ecb-tbl-256: I=99'), - ('000102034142434c1c1d1a1b8d727371', '091a658a2f7444c16accb669450c7b63', - 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', - 'ecb-tbl-256: I=100'), - ('8e8f8c8db1b2b38c56575051050a0b08', '97c1e3a72cca65fa977d5ed0e8a7bbfc', - '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', - 'ecb-tbl-256: I=101'), - ('a7a6a5a4e8ebeae57f7e7978cad5d4d7', '70c430c6db9a17828937305a2df91a2a', - '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', - 'ecb-tbl-256: I=102'), - ('8a8b888994979689454443429f909192', '629553457fbe2479098571c7c903fde8', - '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', - 'ecb-tbl-256: I=103'), - ('8c8d8e8fe0e3e2ed45444342f1cecfcc', 'a25b25a61f612669e7d91265c7d476ba', - '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', - 'ecb-tbl-256: I=104'), - ('fffefdfc4c4f4e31d8d9dedfb6b9b8bb', 'eb7e4e49b8ae0f024570dda293254fed', - 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', - 'ecb-tbl-256: I=105'), - ('fdfcfffecccfcec12f2e29286679787b', '38fe15d61cca84516e924adce5014f67', - 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', - 'ecb-tbl-256: I=106'), - ('67666564bab9b8a77071767719161714', '3ad208492249108c9f3ebeb167ad0583', - '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', - 'ecb-tbl-256: I=107'), - ('9a9b98992d2e2f2084858283245b5a59', '299ba9f9bf5ab05c3580fc26edd1ed12', - '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', - 'ecb-tbl-256: I=108'), - ('a4a5a6a70b0809365c5d5a5b2c232221', '19dc705b857a60fb07717b2ea5717781', - '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', - 'ecb-tbl-256: I=109'), - ('464744455754555af3f2f5f4afb0b1b2', 'ffc8aeb885b5efcad06b6dbebf92e76b', - '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', - 'ecb-tbl-256: I=110'), - ('323330317675746b7273747549464744', 'f58900c5e0b385253ff2546250a0142b', - 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', - 'ecb-tbl-256: I=111'), - ('a8a9aaab181b1a15808186872b141516', '2ee67b56280bc462429cee6e3370cbc1', - 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', - 'ecb-tbl-256: I=112'), - ('e7e6e5e4202323ddaaabacad343b3a39', '20db650a9c8e9a84ab4d25f7edc8f03f', - 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314161718191b1c1d1e', - 'ecb-tbl-256: I=113'), - ('a8a9aaab2221202fedecebea1e010003', '3c36da169525cf818843805f25b78ae5', - '20212223252627282a2b2c2d2f30313234353637393a3b3c3e3f404143444546', - 'ecb-tbl-256: I=114'), - ('f9f8fbfa5f5c5d42424344450e010003', '9a781d960db9e45e37779042fea51922', - '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364666768696b6c6d6e', - 'ecb-tbl-256: I=115'), - ('57565554f5f6f7f89697909120dfdedd', '6560395ec269c672a3c288226efdba77', - '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', - 'ecb-tbl-256: I=116'), - ('f8f9fafbcccfcef1dddcdbda0e010003', '8c772b7a189ac544453d5916ebb27b9a', - '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', - 'ecb-tbl-256: I=117'), - ('d9d8dbda7073727d80818687c2dddcdf', '77ca5468cc48e843d05f78eed9d6578f', - 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', - 'ecb-tbl-256: I=118'), - ('c5c4c7c6080b0a1588898e8f68676665', '72cdcc71dc82c60d4429c9e2d8195baa', - 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', - 'ecb-tbl-256: I=119'), - ('83828180dcdfded186878081f0cfcecd', '8080d68ce60e94b40b5b8b69eeb35afa', - '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', - 'ecb-tbl-256: I=120'), - ('98999a9bdddedfa079787f7e0a050407', '44222d3cde299c04369d58ac0eba1e8e', - '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', - 'ecb-tbl-256: I=121'), - ('cecfcccd4f4c4d429f9e9998dfc0c1c2', '9b8721b0a8dfc691c5bc5885dbfcb27a', - '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', - 'ecb-tbl-256: I=122'), - ('404142436665647b29282f2eaba4a5a6', '0dc015ce9a3a3414b5e62ec643384183', - '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', - 'ecb-tbl-256: I=123'), - ('33323130e6e5e4eb23222524dea1a0a3', '705715448a8da412025ce38345c2a148', - 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', - 'ecb-tbl-256: I=124'), - ('cfcecdccf6f5f4cbe6e7e0e199969794', 'c32b5b0b6fbae165266c569f4b6ecf0b', - 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', - 'ecb-tbl-256: I=125'), - ('babbb8b97271707fdcdddadb29363734', '4dca6c75192a01ddca9476af2a521e87', - '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', - 'ecb-tbl-256: I=126'), - ('c9c8cbca4447465926272021545b5a59', '058691e627ecbc36ac07b6db423bd698', - '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', - 'ecb-tbl-256: I=127'), - ('050407067477767956575051221d1c1f', '7444527095838fe080fc2bcdd30847eb', - '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', - 'ecb-tbl-256: I=128'), - - # FIPS PUB 800-38A test vectors, 2001 edition. Annex F. - - ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ - '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', - '3ad77bb40d7a3660a89ecaf32466ef97'+'f5d3d58503b9699de785895a96fdbaaf'+ - '43b1cd7f598ece23881b00e3ed030688'+'7b0c785e27e8ad3f8223207104725dd4', - '2b7e151628aed2a6abf7158809cf4f3c', - 'NIST 800-38A, F.1.1, ECB and AES-128'), - - ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ - '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', - 'bd334f1d6e45f25ff712a214571fa5cc'+'974104846d0ad3ad7734ecb3ecee4eef'+ - 'ef7afd2270e2e60adce0ba2face6444e'+'9a4b41ba738d6c72fb16691603c18e0e', - '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', - 'NIST 800-38A, F.1.3, ECB and AES-192'), - - ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ - '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', - 'f3eed1bdb5d2a03c064b5a7e3db181f8'+'591ccb10d410ed26dc5ba74a31362870'+ - 'b6ed21b99ca6f4f9f153e7b1beafed1d'+'23304b7a39f9f3ff067d8d8f9e24ecc7', - '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', - 'NIST 800-38A, F.1.3, ECB and AES-256'), - -] - -test_data_8_lanes = [] -for td in test_data: - test_data_8_lanes.append((td[0] * 8, td[1] * 8, td[2], td[3])) -test_data += test_data_8_lanes - -class TestMultipleBlocks(unittest.TestCase): - - def __init__(self, use_aesni): - unittest.TestCase.__init__(self) - self.use_aesni = use_aesni - - def runTest(self): - # Encrypt data which is 8*2+4 bytes long, so as to trigger (for the - # AESNI variant) both the path that parallelizes 8 lanes and the one - # that processes data serially - - tvs = [ - (b'a' * 16, 'c0b27011eb15bf144d2fc9fae80ea16d4c231cb230416c5fac02e6835ad9d7d0'), - (b'a' * 24, 'df8435ce361a78c535b41dcb57da952abbf9ee5954dc6fbcd75fd00fa626915d'), - (b'a' * 32, '211402de6c80db1f92ba255881178e1f70783b8cfd3b37808205e48b80486cd8') - ] - - for key, expected in tvs: - - cipher = AES.new(key, AES.MODE_ECB, use_aesni=self.use_aesni) - h = SHA256.new() - - pt = b"".join([ tobytes('{0:016x}'.format(x)) for x in range(20) ]) - ct = cipher.encrypt(pt) - self.assertEqual(SHA256.new(ct).hexdigest(), expected) - - -class TestIncompleteBlocks(unittest.TestCase): - - def __init__(self, use_aesni): - unittest.TestCase.__init__(self) - self.use_aesni = use_aesni - - def runTest(self): - # Encrypt data with length not multiple of 16 bytes - - cipher = AES.new(b'4'*16, AES.MODE_ECB, use_aesni=self.use_aesni) - - for msg_len in range(1, 16): - self.assertRaises(ValueError, cipher.encrypt, b'1' * msg_len) - self.assertRaises(ValueError, cipher.encrypt, b'1' * (msg_len+16)) - self.assertRaises(ValueError, cipher.decrypt, b'1' * msg_len) - self.assertRaises(ValueError, cipher.decrypt, b'1' * (msg_len+16)) - - self.assertEqual(cipher.encrypt(b''), b'') - self.assertEqual(cipher.decrypt(b''), b'') - - -class TestOutput(unittest.TestCase): - - def __init__(self, use_aesni): - unittest.TestCase.__init__(self) - self.use_aesni = use_aesni - - def runTest(self): - # Encrypt/Decrypt data and test output parameter - - cipher = AES.new(b'4'*16, AES.MODE_ECB, use_aesni=self.use_aesni) - - pt = b'5' * 16 - ct = cipher.encrypt(pt) - - output = bytearray(16) - res = cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - self.assertEqual(res, None) - - res = cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - self.assertEqual(res, None) - - output = memoryview(bytearray(16)) - cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - - cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - - self.assertRaises(TypeError, cipher.encrypt, pt, output=b'0'*16) - self.assertRaises(TypeError, cipher.decrypt, ct, output=b'0'*16) - - shorter_output = bytearray(15) - self.assertRaises(ValueError, cipher.encrypt, pt, output=shorter_output) - self.assertRaises(ValueError, cipher.decrypt, ct, output=shorter_output) - - -def get_tests(config={}): - from Crypto.Util import _cpu_features - from .common import make_block_tests - - tests = make_block_tests(AES, "AES", test_data, {'use_aesni': False}) - tests += [ TestMultipleBlocks(False) ] - tests += [ TestIncompleteBlocks(False) ] - if _cpu_features.have_aes_ni(): - # Run tests with AES-NI instructions if they are available. - tests += make_block_tests(AES, "AESNI", test_data, {'use_aesni': True}) - tests += [ TestMultipleBlocks(True) ] - tests += [ TestIncompleteBlocks(True) ] - tests += [ TestOutput(True) ] - else: - print("Skipping AESNI tests") - return tests - -if __name__ == '__main__': - import unittest - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') - -# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/v5/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/v5/__init__.py deleted file mode 100644 index 8c75d5cc2cea1b0015aaad527dcab4e29b288da2..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/v5/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# ruff: noqa -from .schema import * -from .api import * - -from ...expr import datum, expr # type: ignore[no-redef] - -from .display import VegaLite, renderers -from .compiler import vegalite_compilers - -from .data import ( - MaxRowsError, - pipe, - curry, - limit_rows, - sample, - to_json, - to_csv, - to_values, - default_data_transformer, - data_transformers, -) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssecalgs/rsa.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssecalgs/rsa.py deleted file mode 100644 index e95dcf1ddc45ad7c2731b258f5edd3abd34e5248..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssecalgs/rsa.py +++ /dev/null @@ -1,119 +0,0 @@ -import math -import struct - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives.asymmetric import padding, rsa - -from dns.dnssecalgs.cryptography import CryptographyPrivateKey, CryptographyPublicKey -from dns.dnssectypes import Algorithm -from dns.rdtypes.ANY.DNSKEY import DNSKEY - - -class PublicRSA(CryptographyPublicKey): - key: rsa.RSAPublicKey - key_cls = rsa.RSAPublicKey - algorithm: Algorithm - chosen_hash: hashes.HashAlgorithm - - def verify(self, signature: bytes, data: bytes) -> None: - self.key.verify(signature, data, padding.PKCS1v15(), self.chosen_hash) - - def encode_key_bytes(self) -> bytes: - """Encode a public key per RFC 3110, section 2.""" - pn = self.key.public_numbers() - _exp_len = math.ceil(int.bit_length(pn.e) / 8) - exp = int.to_bytes(pn.e, length=_exp_len, byteorder="big") - if _exp_len > 255: - exp_header = b"\0" + struct.pack("!H", _exp_len) - else: - exp_header = struct.pack("!B", _exp_len) - if pn.n.bit_length() < 512 or pn.n.bit_length() > 4096: - raise ValueError("unsupported RSA key length") - return exp_header + exp + pn.n.to_bytes((pn.n.bit_length() + 7) // 8, "big") - - @classmethod - def from_dnskey(cls, key: DNSKEY) -> "PublicRSA": - cls._ensure_algorithm_key_combination(key) - keyptr = key.key - (bytes_,) = struct.unpack("!B", keyptr[0:1]) - keyptr = keyptr[1:] - if bytes_ == 0: - (bytes_,) = struct.unpack("!H", keyptr[0:2]) - keyptr = keyptr[2:] - rsa_e = keyptr[0:bytes_] - rsa_n = keyptr[bytes_:] - return cls( - key=rsa.RSAPublicNumbers( - int.from_bytes(rsa_e, "big"), int.from_bytes(rsa_n, "big") - ).public_key(default_backend()) - ) - - -class PrivateRSA(CryptographyPrivateKey): - key: rsa.RSAPrivateKey - key_cls = rsa.RSAPrivateKey - public_cls = PublicRSA - default_public_exponent = 65537 - - def sign(self, data: bytes, verify: bool = False) -> bytes: - """Sign using a private key per RFC 3110, section 3.""" - signature = self.key.sign(data, padding.PKCS1v15(), self.public_cls.chosen_hash) - if verify: - self.public_key().verify(signature, data) - return signature - - @classmethod - def generate(cls, key_size: int) -> "PrivateRSA": - return cls( - key=rsa.generate_private_key( - public_exponent=cls.default_public_exponent, - key_size=key_size, - backend=default_backend(), - ) - ) - - -class PublicRSAMD5(PublicRSA): - algorithm = Algorithm.RSAMD5 - chosen_hash = hashes.MD5() - - -class PrivateRSAMD5(PrivateRSA): - public_cls = PublicRSAMD5 - - -class PublicRSASHA1(PublicRSA): - algorithm = Algorithm.RSASHA1 - chosen_hash = hashes.SHA1() - - -class PrivateRSASHA1(PrivateRSA): - public_cls = PublicRSASHA1 - - -class PublicRSASHA1NSEC3SHA1(PublicRSA): - algorithm = Algorithm.RSASHA1NSEC3SHA1 - chosen_hash = hashes.SHA1() - - -class PrivateRSASHA1NSEC3SHA1(PrivateRSA): - public_cls = PublicRSASHA1NSEC3SHA1 - - -class PublicRSASHA256(PublicRSA): - algorithm = Algorithm.RSASHA256 - chosen_hash = hashes.SHA256() - - -class PrivateRSASHA256(PrivateRSA): - public_cls = PublicRSASHA256 - - -class PublicRSASHA512(PublicRSA): - algorithm = Algorithm.RSASHA512 - chosen_hash = hashes.SHA512() - - -class PrivateRSASHA512(PrivateRSA): - public_cls = PublicRSASHA512 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/merge/util.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/merge/util.py deleted file mode 100644 index 42fe39d5f701e683f52ca7c4022b1bb85749fb6b..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/merge/util.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod, Roozbeh Pournader - -from fontTools.misc.timeTools import timestampNow -from fontTools.ttLib.tables.DefaultTable import DefaultTable -from functools import reduce -import operator -import logging - - -log = logging.getLogger("fontTools.merge") - - -# General utility functions for merging values from different fonts - - -def equal(lst): - lst = list(lst) - t = iter(lst) - first = next(t) - assert all(item == first for item in t), "Expected all items to be equal: %s" % lst - return first - - -def first(lst): - return next(iter(lst)) - - -def recalculate(lst): - return NotImplemented - - -def current_time(lst): - return timestampNow() - - -def bitwise_and(lst): - return reduce(operator.and_, lst) - - -def bitwise_or(lst): - return reduce(operator.or_, lst) - - -def avg_int(lst): - lst = list(lst) - return sum(lst) // len(lst) - - -def onlyExisting(func): - """Returns a filter func that when called with a list, - only calls func on the non-NotImplemented items of the list, - and only so if there's at least one item remaining. - Otherwise returns NotImplemented.""" - - def wrapper(lst): - items = [item for item in lst if item is not NotImplemented] - return func(items) if items else NotImplemented - - return wrapper - - -def sumLists(lst): - l = [] - for item in lst: - l.extend(item) - return l - - -def sumDicts(lst): - d = {} - for item in lst: - d.update(item) - return d - - -def mergeBits(bitmap): - def wrapper(lst): - lst = list(lst) - returnValue = 0 - for bitNumber in range(bitmap["size"]): - try: - mergeLogic = bitmap[bitNumber] - except KeyError: - try: - mergeLogic = bitmap["*"] - except KeyError: - raise Exception("Don't know how to merge bit %s" % bitNumber) - shiftedBit = 1 << bitNumber - mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) - returnValue |= mergedValue << bitNumber - return returnValue - - return wrapper - - -class AttendanceRecordingIdentityDict(object): - """A dictionary-like object that records indices of items actually accessed - from a list.""" - - def __init__(self, lst): - self.l = lst - self.d = {id(v): i for i, v in enumerate(lst)} - self.s = set() - - def __getitem__(self, v): - self.s.add(self.d[id(v)]) - return v - - -class GregariousIdentityDict(object): - """A dictionary-like object that welcomes guests without reservations and - adds them to the end of the guest list.""" - - def __init__(self, lst): - self.l = lst - self.s = set(id(v) for v in lst) - - def __getitem__(self, v): - if id(v) not in self.s: - self.s.add(id(v)) - self.l.append(v) - return v - - -class NonhashableDict(object): - """A dictionary-like object mapping objects to values.""" - - def __init__(self, keys, values=None): - if values is None: - self.d = {id(v): i for i, v in enumerate(keys)} - else: - self.d = {id(k): v for k, v in zip(keys, values)} - - def __getitem__(self, k): - return self.d[id(k)] - - def __setitem__(self, k, v): - self.d[id(k)] = v - - def __delitem__(self, k): - del self.d[id(k)] diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py deleted file mode 100644 index 330042871c521231f2a396add543dd425783722b..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -The `OpenType specification `_ -defines two fixed-point data types: - -``Fixed`` - A 32-bit signed fixed-point number with a 16 bit twos-complement - magnitude component and 16 fractional bits. -``F2DOT14`` - A 16-bit signed fixed-point number with a 2 bit twos-complement - magnitude component and 14 fractional bits. - -To support reading and writing data with these data types, this module provides -functions for converting between fixed-point, float and string representations. - -.. data:: MAX_F2DOT14 - - The maximum value that can still fit in an F2Dot14. (1.99993896484375) -""" - -from .roundTools import otRound, nearestMultipleShortestRepr -import logging - -log = logging.getLogger(__name__) - -__all__ = [ - "MAX_F2DOT14", - "fixedToFloat", - "floatToFixed", - "floatToFixedToFloat", - "floatToFixedToStr", - "fixedToStr", - "strToFixed", - "strToFixedToFloat", - "ensureVersionIsLong", - "versionToFixed", -] - - -MAX_F2DOT14 = 0x7FFF / (1 << 14) - - -def fixedToFloat(value, precisionBits): - """Converts a fixed-point number to a float given the number of - precision bits. - - Args: - value (int): Number in fixed-point format. - precisionBits (int): Number of precision bits. - - Returns: - Floating point value. - - Examples:: - - >>> import math - >>> f = fixedToFloat(-10139, precisionBits=14) - >>> math.isclose(f, -0.61883544921875) - True - """ - return value / (1 << precisionBits) - - -def floatToFixed(value, precisionBits): - """Converts a float to a fixed-point number given the number of - precision bits. - - Args: - value (float): Floating point value. - precisionBits (int): Number of precision bits. - - Returns: - int: Fixed-point representation. - - Examples:: - - >>> floatToFixed(-0.61883544921875, precisionBits=14) - -10139 - >>> floatToFixed(-0.61884, precisionBits=14) - -10139 - """ - return otRound(value * (1 << precisionBits)) - - -def floatToFixedToFloat(value, precisionBits): - """Converts a float to a fixed-point number and back again. - - By converting the float to fixed, rounding it, and converting it back - to float again, this returns a floating point values which is exactly - representable in fixed-point format. - - Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``. - - Args: - value (float): The input floating point value. - precisionBits (int): Number of precision bits. - - Returns: - float: The transformed and rounded value. - - Examples:: - >>> import math - >>> f1 = -0.61884 - >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14) - >>> f1 != f2 - True - >>> math.isclose(f2, -0.61883544921875) - True - """ - scale = 1 << precisionBits - return otRound(value * scale) / scale - - -def fixedToStr(value, precisionBits): - """Converts a fixed-point number to a string representing a decimal float. - - This chooses the float that has the shortest decimal representation (the least - number of fractional decimal digits). - - For example, to convert a fixed-point number in a 2.14 format, use - ``precisionBits=14``:: - - >>> fixedToStr(-10139, precisionBits=14) - '-0.61884' - - This is pretty slow compared to the simple division used in ``fixedToFloat``. - Use sporadically when you need to serialize or print the fixed-point number in - a human-readable form. - It uses nearestMultipleShortestRepr under the hood. - - Args: - value (int): The fixed-point value to convert. - precisionBits (int): Number of precision bits, *up to a maximum of 16*. - - Returns: - str: A string representation of the value. - """ - scale = 1 << precisionBits - return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale) - - -def strToFixed(string, precisionBits): - """Converts a string representing a decimal float to a fixed-point number. - - Args: - string (str): A string representing a decimal float. - precisionBits (int): Number of precision bits, *up to a maximum of 16*. - - Returns: - int: Fixed-point representation. - - Examples:: - - >>> ## to convert a float string to a 2.14 fixed-point number: - >>> strToFixed('-0.61884', precisionBits=14) - -10139 - """ - value = float(string) - return otRound(value * (1 << precisionBits)) - - -def strToFixedToFloat(string, precisionBits): - """Convert a string to a decimal float with fixed-point rounding. - - This first converts string to a float, then turns it into a fixed-point - number with ``precisionBits`` fractional binary digits, then back to a - float again. - - This is simply a shorthand for fixedToFloat(floatToFixed(float(s))). - - Args: - string (str): A string representing a decimal float. - precisionBits (int): Number of precision bits. - - Returns: - float: The transformed and rounded value. - - Examples:: - - >>> import math - >>> s = '-0.61884' - >>> bits = 14 - >>> f = strToFixedToFloat(s, precisionBits=bits) - >>> math.isclose(f, -0.61883544921875) - True - >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits) - True - """ - value = float(string) - scale = 1 << precisionBits - return otRound(value * scale) / scale - - -def floatToFixedToStr(value, precisionBits): - """Convert float to string with fixed-point rounding. - - This uses the shortest decimal representation (ie. the least - number of fractional decimal digits) to represent the equivalent - fixed-point number with ``precisionBits`` fractional binary digits. - It uses nearestMultipleShortestRepr under the hood. - - >>> floatToFixedToStr(-0.61883544921875, precisionBits=14) - '-0.61884' - - Args: - value (float): The float value to convert. - precisionBits (int): Number of precision bits, *up to a maximum of 16*. - - Returns: - str: A string representation of the value. - - """ - scale = 1 << precisionBits - return nearestMultipleShortestRepr(value, factor=1.0 / scale) - - -def ensureVersionIsLong(value): - """Ensure a table version is an unsigned long. - - OpenType table version numbers are expressed as a single unsigned long - comprising of an unsigned short major version and unsigned short minor - version. This function detects if the value to be used as a version number - looks too small (i.e. is less than ``0x10000``), and converts it to - fixed-point using :func:`floatToFixed` if so. - - Args: - value (Number): a candidate table version number. - - Returns: - int: A table version number, possibly corrected to fixed-point. - """ - if value < 0x10000: - newValue = floatToFixed(value, 16) - log.warning( - "Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x", - value, - newValue, - ) - value = newValue - return value - - -def versionToFixed(value): - """Ensure a table version number is fixed-point. - - Args: - value (str): a candidate table version number. - - Returns: - int: A table version number, possibly corrected to fixed-point. - """ - value = int(value, 0) if value.startswith("0") else float(value) - value = ensureVersionIsLong(value) - return value diff --git a/spaces/johnhelf/roop/roop/face_analyser.py b/spaces/johnhelf/roop/roop/face_analyser.py deleted file mode 100644 index 9c0afe458763edb22dc2332f527dfdba48575b1d..0000000000000000000000000000000000000000 --- a/spaces/johnhelf/roop/roop/face_analyser.py +++ /dev/null @@ -1,34 +0,0 @@ -import threading -from typing import Any -import insightface - -import roop.globals -from roop.typing import Frame - -FACE_ANALYSER = None -THREAD_LOCK = threading.Lock() - - -def get_face_analyser() -> Any: - global FACE_ANALYSER - - with THREAD_LOCK: - if FACE_ANALYSER is None: - FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers) - FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) - return FACE_ANALYSER - - -def get_one_face(frame: Frame) -> Any: - face = get_face_analyser().get(frame) - try: - return min(face, key=lambda x: x.bbox[0]) - except ValueError: - return None - - -def get_many_faces(frame: Frame) -> Any: - try: - return get_face_analyser().get(frame) - except IndexError: - return None diff --git a/spaces/jordonpeter01/MusicGen2/tests/modules/test_conv.py b/spaces/jordonpeter01/MusicGen2/tests/modules/test_conv.py deleted file mode 100644 index 28fbc4f1a0ebaf41b56947b767958ae696e75eec..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/tests/modules/test_conv.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product -import math -import random - -import pytest -import torch -from torch import nn - -from audiocraft.modules import ( - NormConv1d, - NormConvTranspose1d, - StreamableConv1d, - StreamableConvTranspose1d, - pad1d, - unpad1d, -) - - -def test_get_extra_padding_for_conv1d(): - # TODO: Implement me! - pass - - -def test_pad1d_zeros(): - x = torch.randn(1, 1, 20) - - xp1 = pad1d(x, (0, 5), mode='constant', value=0.) - assert xp1.shape[-1] == 25 - xp2 = pad1d(x, (5, 5), mode='constant', value=0.) - assert xp2.shape[-1] == 30 - xp3 = pad1d(x, (0, 0), mode='constant', value=0.) - assert xp3.shape[-1] == 20 - xp4 = pad1d(x, (10, 30), mode='constant', value=0.) - assert xp4.shape[-1] == 60 - - with pytest.raises(AssertionError): - pad1d(x, (-1, 0), mode='constant', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (0, -1), mode='constant', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (-1, -1), mode='constant', value=0.) - - -def test_pad1d_reflect(): - x = torch.randn(1, 1, 20) - - xp1 = pad1d(x, (0, 5), mode='reflect', value=0.) - assert xp1.shape[-1] == 25 - xp2 = pad1d(x, (5, 5), mode='reflect', value=0.) - assert xp2.shape[-1] == 30 - xp3 = pad1d(x, (0, 0), mode='reflect', value=0.) - assert xp3.shape[-1] == 20 - xp4 = pad1d(x, (10, 30), mode='reflect', value=0.) - assert xp4.shape[-1] == 60 - - with pytest.raises(AssertionError): - pad1d(x, (-1, 0), mode='reflect', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (0, -1), mode='reflect', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (-1, -1), mode='reflect', value=0.) - - -def test_unpad1d(): - x = torch.randn(1, 1, 20) - - u1 = unpad1d(x, (5, 5)) - assert u1.shape[-1] == 10 - u2 = unpad1d(x, (0, 5)) - assert u2.shape[-1] == 15 - u3 = unpad1d(x, (5, 0)) - assert u3.shape[-1] == 15 - u4 = unpad1d(x, (0, 0)) - assert u4.shape[-1] == x.shape[-1] - - with pytest.raises(AssertionError): - unpad1d(x, (-1, 0)) - - with pytest.raises(AssertionError): - unpad1d(x, (0, -1)) - - with pytest.raises(AssertionError): - unpad1d(x, (-1, -1)) - - -class TestNormConv1d: - - def test_norm_conv1d_modules(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out, kernel_size, stride = 1, 4, 1 - expected_out_length = int((T - kernel_size) / stride + 1) - wn_conv = NormConv1d(C, 1, kernel_size=4, norm='weight_norm') - gn_conv = NormConv1d(C, 1, kernel_size=4, norm='time_group_norm') - nn_conv = NormConv1d(C, 1, kernel_size=4, norm='none') - - assert isinstance(wn_conv.norm, nn.Identity) - assert isinstance(wn_conv.conv, nn.Conv1d) - - assert isinstance(gn_conv.norm, nn.GroupNorm) - assert isinstance(gn_conv.conv, nn.Conv1d) - - assert isinstance(nn_conv.norm, nn.Identity) - assert isinstance(nn_conv.conv, nn.Conv1d) - - for conv_layer in [wn_conv, gn_conv, nn_conv]: - out = conv_layer(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestNormConvTranspose1d: - - def test_normalizations(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out, kernel_size, stride = 1, 4, 1 - expected_out_length = (T - 1) * stride + (kernel_size - 1) + 1 - - wn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='weight_norm') - gn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='time_group_norm') - nn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='none') - - assert isinstance(wn_convtr.norm, nn.Identity) - assert isinstance(wn_convtr.convtr, nn.ConvTranspose1d) - - assert isinstance(gn_convtr.norm, nn.GroupNorm) - assert isinstance(gn_convtr.convtr, nn.ConvTranspose1d) - - assert isinstance(nn_convtr.norm, nn.Identity) - assert isinstance(nn_convtr.convtr, nn.ConvTranspose1d) - - for convtr_layer in [wn_convtr, gn_convtr, nn_convtr]: - out = convtr_layer(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestStreamableConv1d: - - def get_streamable_conv1d_output_length(self, length, kernel_size, stride, dilation): - # StreamableConv1d internally pads to make sure that the last window is full - padding_total = (kernel_size - 1) * dilation - (stride - 1) - n_frames = (length - kernel_size + padding_total) / stride + 1 - ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) - return ideal_length // stride - - def test_streamable_conv1d(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - C_out = 1 - - # conv params are [(kernel_size, stride, dilation)] - conv_params = [(4, 1, 1), (4, 2, 1), (3, 1, 3), (10, 5, 1), (3, 2, 3)] - for causal, (kernel_size, stride, dilation) in product([False, True], conv_params): - expected_out_length = self.get_streamable_conv1d_output_length(T, kernel_size, stride, dilation) - sconv = StreamableConv1d(C, C_out, kernel_size=kernel_size, stride=stride, dilation=dilation, causal=causal) - out = sconv(t0) - assert isinstance(out, torch.Tensor) - print(list(out.shape), [N, C_out, expected_out_length]) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestStreamableConvTranspose1d: - - def get_streamable_convtr1d_output_length(self, length, kernel_size, stride): - padding_total = (kernel_size - stride) - return (length - 1) * stride - padding_total + (kernel_size - 1) + 1 - - def test_streamable_convtr1d(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out = 1 - - with pytest.raises(AssertionError): - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=False, trim_right_ratio=0.5) - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=-1.) - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=2) - - # causal params are [(causal, trim_right)] - causal_params = [(False, 1.0), (True, 1.0), (True, 0.5), (True, 0.0)] - # conv params are [(kernel_size, stride)] - conv_params = [(4, 1), (4, 2), (3, 1), (10, 5)] - for ((causal, trim_right_ratio), (kernel_size, stride)) in product(causal_params, conv_params): - expected_out_length = self.get_streamable_convtr1d_output_length(T, kernel_size, stride) - sconvtr = StreamableConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, - causal=causal, trim_right_ratio=trim_right_ratio) - out = sconvtr(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] diff --git a/spaces/jpfearnworks/ai_agents/modules/settings/user_settings.py b/spaces/jpfearnworks/ai_agents/modules/settings/user_settings.py deleted file mode 100644 index 3f5110411e8a9826f56d9ae7f5535482de51d96c..0000000000000000000000000000000000000000 --- a/spaces/jpfearnworks/ai_agents/modules/settings/user_settings.py +++ /dev/null @@ -1,22 +0,0 @@ -import os - -class UserSettings: - __instance = None - - def __init__(self): - if UserSettings.__instance is not None: - raise Exception("UserSettings is a singleton class. Use UserSettings.get_instance() to get the instance.") - self.api_key = None - - @staticmethod - def get_instance(): - if UserSettings.__instance is None: - UserSettings.__instance = UserSettings() - return UserSettings.__instance - - def set_api_key(self, api_key): - self.api_key = api_key - os.environ["OPENAI_API_KEY"] = api_key - - def get_api_key(self): - return self.api_key diff --git a/spaces/jtpotato/firetrace/firetrace/theme.py b/spaces/jtpotato/firetrace/firetrace/theme.py deleted file mode 100644 index dafd42a588f08bab735d6b94e9bed9e04edd3ec3..0000000000000000000000000000000000000000 --- a/spaces/jtpotato/firetrace/firetrace/theme.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -This file contains the theme for the Gradio interface. -""" - -import gradio as gr - -theme = gr.themes.Default( - neutral_hue="zinc", - font=[gr.themes.GoogleFont('Poppins'), 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=[gr.themes.GoogleFont('JetBrains Mono'), 'ui-monospace', 'Consolas', 'monospace'], -) \ No newline at end of file diff --git a/spaces/jyseo/3DFuse/ldm/modules/midas/midas/midas_net_custom.py b/spaces/jyseo/3DFuse/ldm/modules/midas/midas/midas_net_custom.py deleted file mode 100644 index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000 --- a/spaces/jyseo/3DFuse/ldm/modules/midas/midas/midas_net_custom.py +++ /dev/null @@ -1,128 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder - - -class MidasNet_small(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, - blocks={'expand': True}): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet_small, self).__init__() - - use_pretrained = False if path else True - - self.channels_last = channels_last - self.blocks = blocks - self.backbone = backbone - - self.groups = 1 - - features1=features - features2=features - features3=features - features4=features - self.expand = False - if "expand" in self.blocks and self.blocks['expand'] == True: - self.expand = True - features1=features - features2=features*2 - features3=features*4 - features4=features*8 - - self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) - - self.scratch.activation = nn.ReLU(False) - - self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) - - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), - self.scratch.activation, - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - nn.Identity(), - ) - - if path: - self.load(path) - - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - if self.channels_last==True: - print("self.channels_last = ", self.channels_last) - x.contiguous(memory_format=torch.channels_last) - - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) - - - -def fuse_model(m): - prev_previous_type = nn.Identity() - prev_previous_name = '' - previous_type = nn.Identity() - previous_name = '' - for name, module in m.named_modules(): - if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: - # print("FUSED ", prev_previous_name, previous_name, name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) - elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: - # print("FUSED ", prev_previous_name, previous_name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) - # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: - # print("FUSED ", previous_name, name) - # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) - - prev_previous_type = previous_type - prev_previous_name = previous_name - previous_type = type(module) - previous_name = name \ No newline at end of file diff --git a/spaces/kangvcar/RealChar/realtime_ai_character/logger.py b/spaces/kangvcar/RealChar/realtime_ai_character/logger.py deleted file mode 100644 index 88b05c5e233eb337fa21f9d9590efbfb112a8ed4..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/realtime_ai_character/logger.py +++ /dev/null @@ -1,18 +0,0 @@ -import logging - -formatter = '%(asctime)s - %(funcName)s - %(filename)s - %(levelname)s - %(message)s' - - -def get_logger(logger_name): - logger = logging.getLogger(logger_name) - logger.setLevel(logging.DEBUG) - - # create console handler and set level to debug - console_handler = logging.StreamHandler() - console_handler.setLevel(logging.DEBUG) - ch_format = logging.Formatter(formatter) - console_handler.setFormatter(ch_format) - - logger.addHandler(console_handler) - - return logger diff --git a/spaces/kdrkdrkdr/HinaTTS/utils.py b/spaces/kdrkdrkdr/HinaTTS/utils.py deleted file mode 100644 index 4cb5b43d0ca2bae496e7871b2094f2ffb26ab642..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/HinaTTS/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/kdrkdrkdr/LisaTTS/monotonic_align/__init__.py b/spaces/kdrkdrkdr/LisaTTS/monotonic_align/__init__.py deleted file mode 100644 index 40b6f64aa116c74cac2f6a33444c9eeea2fdb38c..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/LisaTTS/monotonic_align/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) - diff --git a/spaces/keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923/README.md b/spaces/keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923/README.md deleted file mode 100644 index 7c2ef9b9423a428a58620b3ddc39ae01a20f1490..0000000000000000000000000000000000000000 --- a/spaces/keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AutoML UsingStreamlit Plotly 020923 -emoji: 🐨 -colorFrom: indigo -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/keras-io/conv_autoencoder/app.py b/spaces/keras-io/conv_autoencoder/app.py deleted file mode 100644 index f7969453a013249a534186d173f85b0ba70573aa..0000000000000000000000000000000000000000 --- a/spaces/keras-io/conv_autoencoder/app.py +++ /dev/null @@ -1,34 +0,0 @@ - -import numpy as np -import tensorflow as tf -import gradio as gr -from huggingface_hub import from_pretrained_keras - -model = from_pretrained_keras("keras-io/conv_autoencoder") - -examples = [ - ['./example_0.jpeg'], - ['./example_1.jpeg'], - ['./example_2.jpeg'], - ['./example_3.jpeg'], - ['./example_4.jpeg'] -] - -def infer(original_image): - image = tf.keras.utils.img_to_array(original_image) - image = image.astype("float32") / 255.0 - image = np.reshape(image, (1, 28, 28, 1)) - output = model.predict(image) - output = np.reshape(output, (28, 28, 1)) - output_image = tf.keras.preprocessing.image.array_to_img(output) - return output_image - -iface = gr.Interface( - fn = infer, - title = "Image Denoising using Convolutional AutoEncoders", - description = "Keras Implementation of a deep convolutional autoencoder for image denoising", - inputs = gr.inputs.Image(image_mode='L', shape=(28, 28)), - outputs = gr.outputs.Image(type = 'pil'), - examples = examples, - article = "Author: Vivek Rai. Based on the keras example from Santiago L. Valdarrama \n Model Link: https://huggingface.co/keras-io/conv_autoencoder", - ).launch(enable_queue=True, debug = True) \ No newline at end of file diff --git a/spaces/keras-io/what-convnets-learn/app.py b/spaces/keras-io/what-convnets-learn/app.py deleted file mode 100644 index 7d29bad7c8fc26c63e007cf43ceda5c438c83a07..0000000000000000000000000000000000000000 --- a/spaces/keras-io/what-convnets-learn/app.py +++ /dev/null @@ -1,158 +0,0 @@ -# Imports -import numpy as np -import matplotlib.pyplot as plt - -import tensorflow as tf -from tensorflow import keras - -import streamlit as st - -from app_utils import * - -# The functions (except main) are taken straight from Keras Example -def compute_loss(feature_extractor, input_image, filter_index): - activation = feature_extractor(input_image) - # We avoid border artifacts by only involving non-border pixels in the loss. - filter_activation = activation[:, 2:-2, 2:-2, filter_index] - return tf.reduce_mean(filter_activation) - - -@tf.function -def gradient_ascent_step(feature_extractor, img, filter_index, learning_rate): - with tf.GradientTape() as tape: - tape.watch(img) - loss = compute_loss(feature_extractor, img, filter_index) - # Compute gradients. - grads = tape.gradient(loss, img) - # Normalize gradients. - grads = tf.math.l2_normalize(grads) - img += learning_rate * grads - return loss, img - - -def initialize_image(): - # We start from a gray image with some random noise - img = tf.random.uniform((1, IMG_WIDTH, IMG_HEIGHT, 3)) - # ResNet50V2 expects inputs in the range [-1, +1]. - # Here we scale our random inputs to [-0.125, +0.125] - return (img - 0.5) * 0.25 - - -def visualize_filter(feature_extractor, filter_index): - # We run gradient ascent for 20 steps - img = initialize_image() - for _ in range(ITERATIONS): - loss, img = gradient_ascent_step( - feature_extractor, img, filter_index, LEARNING_RATE - ) - - # Decode the resulting input image - img = deprocess_image(img[0].numpy()) - return loss, img - - -def deprocess_image(img): - # Normalize array: center on 0., ensure variance is 0.15 - img -= img.mean() - img /= img.std() + 1e-5 - img *= 0.15 - - # Center crop - img = img[25:-25, 25:-25, :] - - # Clip to [0, 1] - img += 0.5 - img = np.clip(img, 0, 1) - - # Convert to RGB array - img *= 255 - img = np.clip(img, 0, 255).astype("uint8") - return img - - -# The visualization function -def main(): - # Initialize states - initialize_states() - - # Model selector - mn_option = st.selectbox("Select the model for visualization -", AVAILABLE_MODELS) - - # Check to not load the model for ever layer change - if mn_option != st.session_state.model_name: - model = getattr(keras.applications, mn_option)( - weights="imagenet", include_top=False - ) - st.session_state.layer_list = ["": - if ln_option != st.session_state.layer_name: - layer = st.session_state.model.get_layer(name=ln_option) - st.session_state.feat_extract = keras.Model( - inputs=st.session_state.model.inputs, outputs=layer.output - ) - st.session_state.layer_name = ln_option - - # Filter index selector - if st.session_state.layer_name: - warn_ph = st.empty() - layer_ph = st.empty() - - filter_select = st.selectbox("Visualize -", VIS_OPTION.keys()) - - if VIS_OPTION[filter_select] == 0: - loss, img = visualize_filter(st.session_state.feat_extract, 0) - st.image(img) - else: - layer = st.session_state.model.get_layer(name=st.session_state.layer_name) - num_filters = layer.get_output_at(0).get_shape().as_list()[-1] - - warn_ph.warning( - ":exclamation: Calculating the gradients can take a while.." - ) - if num_filters < 64: - layer_ph.info( - f"{st.session_state.layer_name} has only {num_filters} filters, visualizing only those filters.." - ) - - prog_bar = st.progress(0) - fig, axis = plt.subplots(nrows=8, ncols=8, figsize=(14, 14)) - for filter_index, ax in enumerate(axis.ravel()[: min(num_filters, 64)]): - prog_bar.progress((filter_index + 1) / min(num_filters, 64)) - loss, img = visualize_filter( - st.session_state.feat_extract, filter_index - ) - ax.imshow(img) - ax.set_title(filter_index + 1) - ax.set_axis_off() - else: - for ax in axis.ravel()[num_filters:]: - ax.set_axis_off() - - st.write(fig) - warn_ph.empty() - - -if __name__ == "__main__": - - with open("model_names.txt", "r") as op: - AVAILABLE_MODELS = [i.strip() for i in op.readlines()] - - st.set_page_config(layout="wide") - - st.title(title) - st.write(info_text) - st.info(f"{credits}\n\n{replicate}\n\n{vit_info}") - st.write(self_credit) - - main() diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/options/inference_options.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/options/inference_options.py deleted file mode 100644 index c453965959ab4cfb31acbc424f994db68c3d4df5..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/options/inference_options.py +++ /dev/null @@ -1,23 +0,0 @@ -from face3d.options.base_options import BaseOptions - - -class InferenceOptions(BaseOptions): - """This class includes test options. - - It also includes shared options defined in BaseOptions. - """ - - def initialize(self, parser): - parser = BaseOptions.initialize(self, parser) # define shared options - parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') - parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]') - - parser.add_argument('--input_dir', type=str, help='the folder of the input files') - parser.add_argument('--keypoint_dir', type=str, help='the folder of the keypoint files') - parser.add_argument('--output_dir', type=str, default='mp4', help='the output dir to save the extracted coefficients') - parser.add_argument('--save_split_files', action='store_true', help='save split files or not') - parser.add_argument('--inference_batch_size', type=int, default=8) - - # Dropout and Batchnorm has different behavior during training and test. - self.isTrain = False - return parser diff --git a/spaces/kevinwang676/Voice-Cloning-for-YouTube/Makefile b/spaces/kevinwang676/Voice-Cloning-for-YouTube/Makefile deleted file mode 100644 index ad23323414bd2175956f6aef92f223a02f7258be..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Voice-Cloning-for-YouTube/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: quality style - -# Check that source code meets quality standards -quality: - black --check --diff . - ruff . - -# Format source code automatically -style: - black . - ruff . --fix diff --git a/spaces/khaclinh/self-driving-anonymization/app.py b/spaces/khaclinh/self-driving-anonymization/app.py deleted file mode 100644 index f100e15128427bb2c3d037ac678128aa72a03227..0000000000000000000000000000000000000000 --- a/spaces/khaclinh/self-driving-anonymization/app.py +++ /dev/null @@ -1,133 +0,0 @@ -# Author: khaclinh -import os - -os.system('pip install yolox') - -import gradio as gr -import torch -import numpy as np -from PIL import Image -import importlib - -import cv2 - -from yolox.utils import postprocess -from yolox.data.data_augment import ValTransform - -ckpt_file = 'model_weights/best_ckpt.pth' - -# get YOLOX experiment -current_exp = importlib.import_module('pp4av_exp') -exp = current_exp.Exp() - -# set inference parameters -test_size = (800, 800) -num_classes = 2 -nmsthre = 0.3 - -GDPR_CLASSES = ( - "Face", - "Plate" -) - - -# get YOLOX model -model = exp.get_model() -#model.cuda() -model.eval() - -# get custom trained checkpoint -ckpt = torch.load(ckpt_file, map_location="cpu") -model.load_state_dict(ckpt["model"]) - - -def yolox_inference(img, model, prob_threshold, test_size): - bboxes = [] - bbclasses = [] - scores = [] - - preproc = ValTransform(legacy = False) - - tensor_img, _ = preproc(img, None, test_size) - tensor_img = torch.from_numpy(tensor_img).unsqueeze(0) - tensor_img = tensor_img.float() - #tensor_img = tensor_img.cuda() - - with torch.no_grad(): - outputs = model(tensor_img) - outputs = postprocess( - outputs, num_classes, prob_threshold, - nmsthre, class_agnostic=True - ) - - if outputs[0] is None: - return [], [], [] - - outputs = outputs[0].cpu() - bboxes = outputs[:, 0:4] - - bboxes /= min(test_size[0] / img.shape[0], test_size[1] / img.shape[1]) - bbclasses = outputs[:, 6] - scores = outputs[:, 4] * outputs[:, 5] - - return bboxes, bbclasses, scores - - -def draw_yolox_predictions(img, bboxes, scores, bbclasses, prob_threshold, classes_dict): - for i in range(len(bboxes)): - box = bboxes[i] - cls_id = int(bbclasses[i]) - score = scores[i] - if score < prob_threshold: - continue - x0 = int(box[0]) - y0 = int(box[1]) - x1 = int(box[2]) - y1 = int(box[3]) - if cls_id == 0: - - cv2.rectangle(img, (x0, y0), (x1, y1), (0, 255, 0), 2) - cv2.putText(img, '{}:{:.1f}%'.format(classes_dict[cls_id], score * 100), (x0, y0 - 3), cv2.FONT_HERSHEY_PLAIN, 0.8, (0,255,0), thickness = 1) - else: - cv2.rectangle(img, (x0, y0), (x1, y1), (255, 0, 0), 2) - cv2.putText(img, '{}:{:.1f}%'.format(classes_dict[cls_id], score * 100), (x0, y0 - 3), cv2.FONT_HERSHEY_PLAIN, 0.8, (255,0,0), thickness = 1) - - - return img - - -def pp4av_detect(img, prob_threshold=0.1): - # Convert PIL image to CV2 - open_cv_image = np.array(img) - # Convert RGB to BGR - open_cv_image = open_cv_image[:, :, ::-1].copy() - - bboxes, bbclasses, scores = yolox_inference(open_cv_image, model, prob_threshold, test_size) - - out = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB) - # Draw predictions - out_image = draw_yolox_predictions(out, bboxes, scores, bbclasses, prob_threshold, GDPR_CLASSES) - - return Image.fromarray(out_image) - - -img_input = gr.inputs.Image(type='pil', label="Original Image") -img_output = gr.outputs.Image(type="pil", label="Output Image") - -prob_threshold_slider = gr.Slider(minimum=0, maximum=1.0, step=0.01, value=0.1, label="Confidence Threshold") - -title = "PP4AV: Deep Learning model for Data Anonymization in Autonomous Driving" -description = "Detecting faces and license plates in image data from self-driving cars. Take a picture, upload an image, or click an example image to use." -article = "" - -examples = [['data/fisheye.jpg'], ['data/zurich.jpg'], ['data/stuttgart.jpg'], ['data/strasbourg.jpg']] -gr.Interface( - fn = pp4av_detect, - inputs = [img_input, prob_threshold_slider], - outputs = img_output, - title = title, - description = description, - article = article, - examples = examples, - theme = "huggingface" -).launch(enable_queue=True) diff --git a/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/routes/api/rte_wsi.py b/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/routes/api/rte_wsi.py deleted file mode 100644 index 1176222931bc5fb05190a6770eeab3477421f32d..0000000000000000000000000000000000000000 --- a/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/routes/api/rte_wsi.py +++ /dev/null @@ -1,56 +0,0 @@ -from fastapi import APIRouter, Request, Response -from fastapi.responses import HTMLResponse - - -import main as libMain -from lib import utils as libUtils - - -m_kstrFile = __file__ -m_blnTraceOn = True - -m_kstrPath_templ = libUtils.pth_templ - - -rteWsi = APIRouter() - - -#--- -@rteWsi.get('/') -def api_wsi(): - return { - "message": "wsi api endpoint - welcome to the endpoint for wsi image processing" - } - - -#--- -@rteWsi.get('/upload') -def api_wsiUpload(): - ''' - process a single uploaded WSI image (from external app path) - - cleanup all old WSI images in /data/wsi/raw - - save upload to /data/wsi/raw - - create wsi class obj; capture file path, size, zoomMagnif, etc - - return(s) json - - ack wsi upload with info/attribs - ''' - return { - "message": "wsiUpload endpoint - file processing of one uploaded wsi image" - } - - -#--- -@rteWsi.get('/chunk') -def api_wsiChunk(): - ''' - process a single WSI image (from internal app path) - - create wsi class obj; capture file path, size, zoomMagnif, etc - - kick off tile chunking process; - - save tiles to /data/tiles/raw - - return(s) json - - ack wsi upload with info/attribs - - ack of tiles created: total count; names, paths, attribs (dimensions) - ''' - return { - "message": "wsiLoad endpoint - for chunking of wsi image to one or more tiles" - } diff --git a/spaces/king007/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_seg.py b/spaces/king007/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_seg.py deleted file mode 100644 index dfbda0744cc674a5b3195e47d7a507192f999eb2..0000000000000000000000000000000000000000 --- a/spaces/king007/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_seg.py +++ /dev/null @@ -1,353 +0,0 @@ -import gradio as gr -import numpy as np -import torch -from diffusers import ControlNetModel, StableDiffusionControlNetPipeline -from PIL import Image -from transformers import AutoImageProcessor, UperNetForSemanticSegmentation - -from diffusion_webui.utils.model_list import stable_model_list -from diffusion_webui.utils.scheduler_list import ( - SCHEDULER_LIST, - get_scheduler_list, -) - - -def ade_palette(): - """ADE20K palette that maps each class to RGB values.""" - return [ - [120, 120, 120], - [180, 120, 120], - [6, 230, 230], - [80, 50, 50], - [4, 200, 3], - [120, 120, 80], - [140, 140, 140], - [204, 5, 255], - [230, 230, 230], - [4, 250, 7], - [224, 5, 255], - [235, 255, 7], - [150, 5, 61], - [120, 120, 70], - [8, 255, 51], - [255, 6, 82], - [143, 255, 140], - [204, 255, 4], - [255, 51, 7], - [204, 70, 3], - [0, 102, 200], - [61, 230, 250], - [255, 6, 51], - [11, 102, 255], - [255, 7, 71], - [255, 9, 224], - [9, 7, 230], - [220, 220, 220], - [255, 9, 92], - [112, 9, 255], - [8, 255, 214], - [7, 255, 224], - [255, 184, 6], - [10, 255, 71], - [255, 41, 10], - [7, 255, 255], - [224, 255, 8], - [102, 8, 255], - [255, 61, 6], - [255, 194, 7], - [255, 122, 8], - [0, 255, 20], - [255, 8, 41], - [255, 5, 153], - [6, 51, 255], - [235, 12, 255], - [160, 150, 20], - [0, 163, 255], - [140, 140, 140], - [250, 10, 15], - [20, 255, 0], - [31, 255, 0], - [255, 31, 0], - [255, 224, 0], - [153, 255, 0], - [0, 0, 255], - [255, 71, 0], - [0, 235, 255], - [0, 173, 255], - [31, 0, 255], - [11, 200, 200], - [255, 82, 0], - [0, 255, 245], - [0, 61, 255], - [0, 255, 112], - [0, 255, 133], - [255, 0, 0], - [255, 163, 0], - [255, 102, 0], - [194, 255, 0], - [0, 143, 255], - [51, 255, 0], - [0, 82, 255], - [0, 255, 41], - [0, 255, 173], - [10, 0, 255], - [173, 255, 0], - [0, 255, 153], - [255, 92, 0], - [255, 0, 255], - [255, 0, 245], - [255, 0, 102], - [255, 173, 0], - [255, 0, 20], - [255, 184, 184], - [0, 31, 255], - [0, 255, 61], - [0, 71, 255], - [255, 0, 204], - [0, 255, 194], - [0, 255, 82], - [0, 10, 255], - [0, 112, 255], - [51, 0, 255], - [0, 194, 255], - [0, 122, 255], - [0, 255, 163], - [255, 153, 0], - [0, 255, 10], - [255, 112, 0], - [143, 255, 0], - [82, 0, 255], - [163, 255, 0], - [255, 235, 0], - [8, 184, 170], - [133, 0, 255], - [0, 255, 92], - [184, 0, 255], - [255, 0, 31], - [0, 184, 255], - [0, 214, 255], - [255, 0, 112], - [92, 255, 0], - [0, 224, 255], - [112, 224, 255], - [70, 184, 160], - [163, 0, 255], - [153, 0, 255], - [71, 255, 0], - [255, 0, 163], - [255, 204, 0], - [255, 0, 143], - [0, 255, 235], - [133, 255, 0], - [255, 0, 235], - [245, 0, 255], - [255, 0, 122], - [255, 245, 0], - [10, 190, 212], - [214, 255, 0], - [0, 204, 255], - [20, 0, 255], - [255, 255, 0], - [0, 153, 255], - [0, 41, 255], - [0, 255, 204], - [41, 0, 255], - [41, 255, 0], - [173, 0, 255], - [0, 245, 255], - [71, 0, 255], - [122, 0, 255], - [0, 255, 184], - [0, 92, 255], - [184, 255, 0], - [0, 133, 255], - [255, 214, 0], - [25, 194, 194], - [102, 255, 0], - [92, 0, 255], - ] - - -class StableDiffusionControlNetSegGenerator: - def __init__(self): - self.pipe = None - - def load_model( - self, - stable_model_path, - scheduler, - ): - - if self.pipe is None: - controlnet = ControlNetModel.from_pretrained( - "lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16 - ) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - pretrained_model_name_or_path=stable_model_path, - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, - ) - - self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler) - self.pipe.to("cuda") - self.pipe.enable_xformers_memory_efficient_attention() - - return self.pipe - - def controlnet_seg(self, image_path: str): - image_processor = AutoImageProcessor.from_pretrained( - "openmmlab/upernet-convnext-small" - ) - image_segmentor = UperNetForSemanticSegmentation.from_pretrained( - "openmmlab/upernet-convnext-small" - ) - - image = Image.open(image_path).convert("RGB") - pixel_values = image_processor(image, return_tensors="pt").pixel_values - - with torch.no_grad(): - outputs = image_segmentor(pixel_values) - - seg = image_processor.post_process_semantic_segmentation( - outputs, target_sizes=[image.size[::-1]] - )[0] - - color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) - palette = np.array(ade_palette()) - - for label, color in enumerate(palette): - color_seg[seg == label, :] = color - - color_seg = color_seg.astype(np.uint8) - image = Image.fromarray(color_seg) - - return image - - def generate_image( - self, - image_path: str, - model_path: str, - prompt: str, - negative_prompt: str, - num_images_per_prompt: int, - guidance_scale: int, - num_inference_step: int, - scheduler: str, - seed_generator: int, - ): - - image = self.controlnet_seg(image_path=image_path) - pipe = self.load_model( - stable_model_path=model_path, - scheduler=scheduler, - ) - if seed_generator == 0: - random_seed = torch.randint(0, 1000000, (1,)) - generator = torch.manual_seed(random_seed) - else: - generator = torch.manual_seed(seed_generator) - - output = pipe( - prompt=prompt, - image=image, - negative_prompt=negative_prompt, - num_images_per_prompt=num_images_per_prompt, - num_inference_steps=num_inference_step, - guidance_scale=guidance_scale, - generator=generator, - ).images - - return output - - def app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - controlnet_seg_image_file = gr.Image( - type="filepath", label="Image" - ) - - controlnet_seg_prompt = gr.Textbox( - lines=1, - show_label=False, - placeholder="Prompt", - ) - - controlnet_seg_negative_prompt = gr.Textbox( - lines=1, - show_label=False, - placeholder="Negative Prompt", - ) - - with gr.Row(): - with gr.Column(): - controlnet_seg_model_id = gr.Dropdown( - choices=stable_model_list, - value=stable_model_list[0], - label="Stable Model Id", - ) - controlnet_seg_guidance_scale = gr.Slider( - minimum=0.1, - maximum=15, - step=0.1, - value=7.5, - label="Guidance Scale", - ) - - controlnet_seg_num_inference_step = gr.Slider( - minimum=1, - maximum=100, - step=1, - value=50, - label="Num Inference Step", - ) - - with gr.Row(): - with gr.Column(): - controlnet_seg_scheduler = gr.Dropdown( - choices=SCHEDULER_LIST, - value=SCHEDULER_LIST[0], - label="Scheduler", - ) - controlnet_seg_num_images_per_prompt = ( - gr.Slider( - minimum=1, - maximum=10, - step=1, - value=1, - label="Number Of Images", - ) - ) - controlnet_seg_seed_generator = gr.Slider( - minimum=0, - maximum=1000000, - step=1, - value=0, - label="Seed Generator", - ) - - controlnet_seg_predict = gr.Button(value="Generator") - - with gr.Column(): - output_image = gr.Gallery( - label="Generated images", - show_label=False, - elem_id="gallery", - ).style(grid=(1, 2)) - - controlnet_seg_predict.click( - fn=StableDiffusionControlNetSegGenerator().generate_image, - inputs=[ - controlnet_seg_image_file, - controlnet_seg_model_id, - controlnet_seg_prompt, - controlnet_seg_negative_prompt, - controlnet_seg_num_images_per_prompt, - controlnet_seg_guidance_scale, - controlnet_seg_num_inference_step, - controlnet_seg_scheduler, - controlnet_seg_seed_generator, - ], - outputs=[output_image], - ) diff --git a/spaces/kingabzpro/savtadepth/src/code/eval_metric_calculation.py b/spaces/kingabzpro/savtadepth/src/code/eval_metric_calculation.py deleted file mode 100644 index 9e791e6b7770b2891741f5a8af4d1d1b904009f4..0000000000000000000000000000000000000000 --- a/spaces/kingabzpro/savtadepth/src/code/eval_metric_calculation.py +++ /dev/null @@ -1,79 +0,0 @@ -import numpy as np -from PIL import Image -from tqdm import tqdm - -def compute_errors(target, prediction): - thresh = np.maximum((target / prediction), (prediction / target)) - a1 = (thresh < 1.25).mean() - a2 = (thresh < 1.25 ** 2).mean() - a3 = (thresh < 1.25 ** 3).mean() - - abs_rel = np.mean(np.abs(target - prediction) / target) - sq_rel = np.mean(((target - prediction) ** 2) / target) - - rmse = (target - prediction) ** 2 - rmse = np.sqrt(rmse.mean()) - - rmse_log = (np.log(target) - np.log(prediction)) ** 2 - rmse_log = np.sqrt(rmse_log.mean()) - - err = np.log(prediction) - np.log(target) - silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100 - - log_10 = (np.abs(np.log10(target) - np.log10(prediction))).mean() - - return a1, a2, a3, abs_rel, sq_rel, rmse, rmse_log, silog, log_10 - - -def compute_eval_metrics(test_files): - min_depth_eval = 1e-3 - max_depth_eval = 10 - - num_samples = len(test_files) - - a1 = np.zeros(num_samples, np.float32) - a2 = np.zeros(num_samples, np.float32) - a3 = np.zeros(num_samples, np.float32) - abs_rel = np.zeros(num_samples, np.float32) - sq_rel = np.zeros(num_samples, np.float32) - rmse = np.zeros(num_samples, np.float32) - rmse_log = np.zeros(num_samples, np.float32) - silog = np.zeros(num_samples, np.float32) - log10 = np.zeros(num_samples, np.float32) - - for i in tqdm(range(num_samples), desc="Calculating metrics for test data", total=num_samples): - sample_path = test_files[i] - target_path = str(sample_path.parent/(sample_path.stem + "_depth.png")) - pred_path = "src/eval/" + str(sample_path.stem) + "_pred.png" - - target_image = Image.open(target_path) - pred_image = Image.open(pred_path) - - target = np.asarray(target_image) - pred = np.asarray(pred_image) - - target = target / 25.0 - pred = pred / 25.0 - - pred[pred < min_depth_eval] = min_depth_eval - pred[pred > max_depth_eval] = max_depth_eval - pred[np.isinf(pred)] = max_depth_eval - - target[np.isinf(target)] = 0 - target[np.isnan(target)] = 0 - - valid_mask = np.logical_and(target > min_depth_eval, target < max_depth_eval) - - a1[i], a2[i], a3[i], abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], silog[i], log10[i] = \ - compute_errors(target[valid_mask], pred[valid_mask]) - - print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format( - 'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10')) - print("{:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format( - a1.mean(), a2.mean(), a3.mean(), - abs_rel.mean(), sq_rel.mean(), rmse.mean(), rmse_log.mean(), silog.mean(), log10.mean())) - - return dict(a1=a1.mean(), a2=a2.mean(), a3=a3.mean(), - abs_rel=abs_rel.mean(), sq_rel=sq_rel.mean(), - rmse=rmse.mean(), rmse_log=rmse_log.mean(), - log10=log10.mean(), silog=silog.mean()) diff --git a/spaces/kohrisatou-infinity/KIP_01_beta/train.py b/spaces/kohrisatou-infinity/KIP_01_beta/train.py deleted file mode 100644 index 97557410edb18717b0330c602fbaa9984f647b13..0000000000000000000000000000000000000000 --- a/spaces/kohrisatou-infinity/KIP_01_beta/train.py +++ /dev/null @@ -1,281 +0,0 @@ -import logging -logging.getLogger('matplotlib').setLevel(logging.WARNING) -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import commons -import utils -from data_utils import TextAudioSpeakerLoader, EvalDataLoader -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from losses import ( - kl_loss, - generator_loss, discriminator_loss, feature_loss -) - -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO' - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - hps = utils.get_hparams() - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = hps.train.port - - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps) - train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, - batch_size=hps.train.batch_size) - if rank == 0: - eval_dataset = EvalDataLoader(hps.data.validation_files, hps) - eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False, - batch_size=1, pin_memory=False, - drop_last=False) - - net_g = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank]) - - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d) - global_step = (epoch_str - 1) * len(train_loader) - except: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - # train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, items in enumerate(train_loader): - c, f0, spec, y, spk = items - g = spk.cuda(rank, non_blocking=True) - spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True) - c = c.cuda(rank, non_blocking=True) - f0 = f0.cuda(rank, non_blocking=True) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - - with autocast(enabled=hps.train.fp16_run): - y_hat, ids_slice, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(c, f0, spec, g=g, mel=mel) - - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl}) - - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - } - - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict - ) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - with torch.no_grad(): - for batch_idx, items in enumerate(eval_loader): - c, f0, spec, y, spk = items - g = spk[:1].cuda(0) - spec, y = spec[:1].cuda(0), y[:1].cuda(0) - c = c[:1].cuda(0) - f0 = f0[:1].cuda(0) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat = generator.module.infer(c, f0, g=g, mel=mel) - - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - audio_dict.update({ - f"gen/audio_{batch_idx}": y_hat[0], - f"gt/audio_{batch_idx}": y[0] - }) - image_dict.update({ - f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()), - "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy()) - }) - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/koyomimi/Real-CUGAN/README.md b/spaces/koyomimi/Real-CUGAN/README.md deleted file mode 100644 index d673114edadba73e80f33a3c71bc0dbee8758cc8..0000000000000000000000000000000000000000 --- a/spaces/koyomimi/Real-CUGAN/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Real CUGAN -emoji: 🐢 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: DianXian/Real-CUGAN ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/Hdf5StubImagePlugin.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/Hdf5StubImagePlugin.py deleted file mode 100644 index bba05ed65a72c6b859f1722cefd0c75a59c43a37..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/Hdf5StubImagePlugin.py +++ /dev/null @@ -1,73 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# HDF5 stub adapter -# -# Copyright (c) 2000-2003 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -from . import Image, ImageFile - -_handler = None - - -def register_handler(handler): - """ - Install application-specific HDF5 image handler. - - :param handler: Handler object. - """ - global _handler - _handler = handler - - -# -------------------------------------------------------------------- -# Image adapter - - -def _accept(prefix): - return prefix[:8] == b"\x89HDF\r\n\x1a\n" - - -class HDF5StubImageFile(ImageFile.StubImageFile): - format = "HDF5" - format_description = "HDF5" - - def _open(self): - offset = self.fp.tell() - - if not _accept(self.fp.read(8)): - msg = "Not an HDF file" - raise SyntaxError(msg) - - self.fp.seek(offset) - - # make something up - self.mode = "F" - self._size = 1, 1 - - loader = self._load() - if loader: - loader.open(self) - - def _load(self): - return _handler - - -def _save(im, fp, filename): - if _handler is None or not hasattr(_handler, "save"): - msg = "HDF5 save handler not installed" - raise OSError(msg) - _handler.save(im, fp, filename) - - -# -------------------------------------------------------------------- -# Registry - -Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept) -Image.register_save(HDF5StubImageFile.format, _save) - -Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"]) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/cli.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/cli.py deleted file mode 100644 index aa8e8b9b099adbde4cee9f683feaaa5023895120..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/cli.py +++ /dev/null @@ -1,14 +0,0 @@ -import sys - -import gradio.deploy_space -import gradio.reload - - -def cli(): - args = sys.argv[1:] - if len(args) == 0: - raise ValueError("No file specified.") - if args[0] == "deploy": - gradio.deploy_space.deploy() - else: - gradio.reload.main() diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/BlockLabel-95be8dd1.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/BlockLabel-95be8dd1.js deleted file mode 100644 index af53998d7e38019a0c0a38a338ec08d21a5699ae..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/BlockLabel-95be8dd1.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as b,i as h,s as g,G as m,e as d,H as w,I,C as c,M as r,g as y,E as _,m as k,K as j,p as q,t as B,q as C,n as S}from"./index-7c0e54a6.js";import{g as E}from"./Button-661a0701.js";function G(a){let e,n,s,u,f,l;return s=new a[1]({}),{c(){e=m("div"),n=m("span"),d(s.$$.fragment),u=w(),f=I(a[0]),c(n,"class","svelte-1sohkj6"),c(e,"style",a[4]),c(e,"class","svelte-1sohkj6"),r(e,"hide",!a[2]),r(e,"sr-only",!a[2]),r(e,"float",a[3])},m(t,o){y(t,e,o),_(e,n),k(s,n,null),_(e,u),_(e,f),l=!0},p(t,[o]){(!l||o&1)&&j(f,t[0]),(!l||o&16)&&c(e,"style",t[4]),(!l||o&4)&&r(e,"hide",!t[2]),(!l||o&4)&&r(e,"sr-only",!t[2]),(!l||o&8)&&r(e,"float",t[3])},i(t){l||(q(s.$$.fragment,t),l=!0)},o(t){B(s.$$.fragment,t),l=!1},d(t){t&&C(e),S(s)}}}function H(a,e,n){let s,{label:u=null}=e,{Icon:f}=e,{show_label:l=!0}=e,{disable:t=!1}=e,{float:o=!0}=e;return a.$$set=i=>{"label"in i&&n(0,u=i.label),"Icon"in i&&n(1,f=i.Icon),"show_label"in i&&n(2,l=i.show_label),"disable"in i&&n(5,t=i.disable),"float"in i&&n(3,o=i.float)},a.$$.update=()=>{a.$$.dirty&32&&n(4,{styles:s}=E({label_container:!t},["label_container"]),s)},[u,f,l,o,s,t]}class M extends b{constructor(e){super(),h(this,e,H,G,g,{label:0,Icon:1,show_label:2,disable:5,float:3})}}export{M as B}; -//# sourceMappingURL=BlockLabel-95be8dd1.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-ee29d65e.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-ee29d65e.js deleted file mode 100644 index 55c74601405f7372048ad51ac1c980edc744537f..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-ee29d65e.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as J,i as P,s as q,G as E,C as F,g as U,E as z,F as C,q as j,N as fe,r as G,I,H as T,M as W,J as oe,K as X,e as A,f as K,m as v,l as L,t as g,o as R,p as h,n as y,y as O,T as ue,b as _e,a as ce,k as me,V as be,X as de,Y as ge,Z as he,ah as ke,x as we,$ as pe,h as Ae,j as ve}from"./index-7c0e54a6.js";import{B as ye}from"./Button-661a0701.js";import{B as ee}from"./BlockLabel-95be8dd1.js";import{E as Fe}from"./Empty-96265974.js";/* empty css */import{F as H}from"./File-61c5ba96.js";import{U as Be}from"./Upload-f28774c6.js";import{M as ze}from"./ModifyUpload-f9ffeaa8.js";import{n as Y,b as Ue}from"./ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js";import{U as je}from"./UploadText-cb8fda80.js";import"./Blocks-61158678.js";const Ee=t=>{let l=["B","KB","MB","GB","PB"],n=0;for(;t>1024;)t/=1024,n++;let e=l[n];return t.toFixed(1)+" "+e},Z=t=>{var l;return l=t.orig_name||t.name,l.length>30?`${l.substr(0,30)}...`:l},Q=t=>{var l=0;if(Array.isArray(t))for(var n of t)n.size!==void 0&&(l+=n.size);else l=t.size||0;return Ee(l)};function $(t,l,n){const e=t.slice();return e[4]=l[n],e[6]=n,e}function Ne(t){let l;return{c(){l=I("Uploading...")},m(n,e){U(n,l,e)},p:C,d(n){n&&j(l)}}}function Te(t){let l,n,e,a;return{c(){l=E("a"),n=I("Download"),F(l,"href",e=t[4].data),F(l,"target","_blank"),F(l,"download",a=window.__is_colab__?null:t[4].orig_name||t[4].name),F(l,"class","svelte-xrr240")},m(s,i){U(s,l,i),z(l,n)},p(s,i){i&1&&e!==(e=s[4].data)&&F(l,"href",e),i&1&&a!==(a=window.__is_colab__?null:s[4].orig_name||s[4].name)&&F(l,"download",a)},d(s){s&&j(l)}}}function x(t){let l,n,e=Z(t[4])+"",a,s,i,o=Q(t[4])+"",f,d,r,m,u,b;function B(c,p){return c[4].data?Te:Ne}let k=B(t),w=k(t);function S(){return t[3](t[4],t[6])}return{c(){l=E("tr"),n=E("td"),a=I(e),s=T(),i=E("td"),f=I(o),d=T(),r=E("td"),w.c(),m=T(),F(n,"class","svelte-xrr240"),F(i,"class","svelte-xrr240"),F(r,"class","download svelte-xrr240"),F(l,"class","file svelte-xrr240"),W(l,"selectable",t[1])},m(c,p){U(c,l,p),z(l,n),z(n,a),z(l,s),z(l,i),z(i,f),z(l,d),z(l,r),w.m(r,null),z(l,m),u||(b=oe(l,"click",S),u=!0)},p(c,p){t=c,p&1&&e!==(e=Z(t[4])+"")&&X(a,e),p&1&&o!==(o=Q(t[4])+"")&&X(f,o),k===(k=B(t))&&w?w.p(t,p):(w.d(1),w=k(t),w&&(w.c(),w.m(r,null))),p&2&&W(l,"selectable",t[1])},d(c){c&&j(l),w.d(),u=!1,b()}}}function Se(t){let l,n,e,a=Array.isArray(t[0])?t[0]:[t[0]],s=[];for(let i=0;ie("select",{value:o.orig_name||o.name,index:f});return t.$$set=o=>{"value"in o&&n(0,a=o.value),"selectable"in o&&n(1,s=o.selectable)},[a,s,e,i]}class le extends J{constructor(l){super(),P(this,l,Me,Se,q,{value:0,selectable:1})}}function Oe(t){let l,n;return l=new Fe({props:{size:"large",unpadded_box:!0,$$slots:{default:[Ie]},$$scope:{ctx:t}}}),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},p(e,a){const s={};a&32&&(s.$$scope={dirty:a,ctx:e}),l.$set(s)},i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function Ce(t){let l,n;return l=new le({props:{selectable:t[3],value:t[0]}}),l.$on("select",t[4]),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},p(e,a){const s={};a&8&&(s.selectable=e[3]),a&1&&(s.value=e[0]),l.$set(s)},i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function Ie(t){let l,n;return l=new H({}),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function Je(t){let l,n,e,a,s,i;l=new ee({props:{show_label:t[2],float:t[0]===null,Icon:H,label:t[1]||"File"}});const o=[Ce,Oe],f=[];function d(r,m){return r[0]?0:1}return e=d(t),a=f[e]=o[e](t),{c(){A(l.$$.fragment),n=T(),a.c(),s=K()},m(r,m){v(l,r,m),U(r,n,m),f[e].m(r,m),U(r,s,m),i=!0},p(r,[m]){const u={};m&4&&(u.show_label=r[2]),m&1&&(u.float=r[0]===null),m&2&&(u.label=r[1]||"File"),l.$set(u);let b=e;e=d(r),e===b?f[e].p(r,m):(L(),g(f[b],1,1,()=>{f[b]=null}),R(),a=f[e],a?a.p(r,m):(a=f[e]=o[e](r),a.c()),h(a,1),a.m(s.parentNode,s))},i(r){i||(h(l.$$.fragment,r),h(a),i=!0)},o(r){g(l.$$.fragment,r),g(a),i=!1},d(r){y(l,r),r&&j(n),f[e].d(r),r&&j(s)}}}function Pe(t,l,n){let{value:e=null}=l,{label:a}=l,{show_label:s=!0}=l,{selectable:i=!1}=l;function o(f){O.call(this,t,f)}return t.$$set=f=>{"value"in f&&n(0,e=f.value),"label"in f&&n(1,a=f.label),"show_label"in f&&n(2,s=f.show_label),"selectable"in f&&n(3,i=f.selectable)},[e,a,s,i,o]}class qe extends J{constructor(l){super(),P(this,l,Pe,Je,q,{value:0,label:1,show_label:2,selectable:3})}}function De(t){let l,n,e;function a(i){t[12](i)}let s={filetype:t[6],parse_to_data_url:!1,file_count:t[3],$$slots:{default:[Ke]},$$scope:{ctx:t}};return t[5]!==void 0&&(s.dragging=t[5]),l=new Be({props:s}),_e.push(()=>ce(l,"dragging",a)),l.$on("load",t[7]),{c(){A(l.$$.fragment)},m(i,o){v(l,i,o),e=!0},p(i,o){const f={};o&64&&(f.filetype=i[6]),o&8&&(f.file_count=i[3]),o&8192&&(f.$$scope={dirty:o,ctx:i}),!n&&o&32&&(n=!0,f.dragging=i[5],me(()=>n=!1)),l.$set(f)},i(i){e||(h(l.$$.fragment,i),e=!0)},o(i){g(l.$$.fragment,i),e=!1},d(i){y(l,i)}}}function Ge(t){let l,n,e,a;return l=new ze({props:{absolute:!0}}),l.$on("clear",t[8]),e=new le({props:{selectable:t[4],value:t[0]}}),e.$on("select",t[11]),{c(){A(l.$$.fragment),n=T(),A(e.$$.fragment)},m(s,i){v(l,s,i),U(s,n,i),v(e,s,i),a=!0},p(s,i){const o={};i&16&&(o.selectable=s[4]),i&1&&(o.value=s[0]),e.$set(o)},i(s){a||(h(l.$$.fragment,s),h(e.$$.fragment,s),a=!0)},o(s){g(l.$$.fragment,s),g(e.$$.fragment,s),a=!1},d(s){y(l,s),s&&j(n),y(e,s)}}}function Ke(t){let l;const n=t[10].default,e=be(n,t,t[13],null);return{c(){e&&e.c()},m(a,s){e&&e.m(a,s),l=!0},p(a,s){e&&e.p&&(!l||s&8192)&&de(e,n,a,a[13],l?he(n,a[13],s,null):ge(a[13]),null)},i(a){l||(h(e,a),l=!0)},o(a){g(e,a),l=!1},d(a){e&&e.d(a)}}}function Le(t){let l,n,e,a,s,i;l=new ee({props:{show_label:t[2],Icon:H,float:t[0]===null,label:t[1]||"File"}});const o=[Ge,De],f=[];function d(r,m){return r[0]?0:1}return e=d(t),a=f[e]=o[e](t),{c(){A(l.$$.fragment),n=T(),a.c(),s=K()},m(r,m){v(l,r,m),U(r,n,m),f[e].m(r,m),U(r,s,m),i=!0},p(r,[m]){const u={};m&4&&(u.show_label=r[2]),m&1&&(u.float=r[0]===null),m&2&&(u.label=r[1]||"File"),l.$set(u);let b=e;e=d(r),e===b?f[e].p(r,m):(L(),g(f[b],1,1,()=>{f[b]=null}),R(),a=f[e],a?a.p(r,m):(a=f[e]=o[e](r),a.c()),h(a,1),a.m(s.parentNode,s))},i(r){i||(h(l.$$.fragment,r),h(a),i=!0)},o(r){g(l.$$.fragment,r),g(a),i=!1},d(r){y(l,r),r&&j(n),f[e].d(r),r&&j(s)}}}function Re(t,l,n){let{$$slots:e={},$$scope:a}=l,{value:s}=l,{label:i}=l,{show_label:o=!0}=l,{file_count:f="single"}=l,{file_types:d=null}=l,{selectable:r=!1}=l;async function m({detail:c}){n(0,s=c),await ue(),b("change",s),b("upload",c)}function u({detail:c}){n(0,s=null),b("change",s),b("clear")}const b=G();let B;d==null?B=null:(d=d.map(c=>c.startsWith(".")?c:c+"/*"),B=d.join(", "));let k=!1;function w(c){O.call(this,t,c)}function S(c){k=c,n(5,k)}return t.$$set=c=>{"value"in c&&n(0,s=c.value),"label"in c&&n(1,i=c.label),"show_label"in c&&n(2,o=c.show_label),"file_count"in c&&n(3,f=c.file_count),"file_types"in c&&n(9,d=c.file_types),"selectable"in c&&n(4,r=c.selectable),"$$scope"in c&&n(13,a=c.$$scope)},t.$$.update=()=>{t.$$.dirty&32&&b("drag",k)},[s,i,o,f,r,k,B,m,u,d,e,w,S,a]}class He extends J{constructor(l){super(),P(this,l,Re,Le,q,{value:0,label:1,show_label:2,file_count:3,file_types:9,selectable:4})}}function Ve(t){let l,n;return l=new qe({props:{selectable:t[9],value:t[11],label:t[5],show_label:t[6]}}),l.$on("select",t[21]),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},p(e,a){const s={};a&512&&(s.selectable=e[9]),a&2048&&(s.value=e[11]),a&32&&(s.label=e[5]),a&64&&(s.show_label=e[6]),l.$set(s)},i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function We(t){let l,n;return l=new He({props:{label:t[5],show_label:t[6],value:t[11],file_count:t[7],file_types:t[8],selectable:t[9],$$slots:{default:[Xe]},$$scope:{ctx:t}}}),l.$on("change",t[17]),l.$on("drag",t[18]),l.$on("clear",t[19]),l.$on("select",t[20]),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},p(e,a){const s={};a&32&&(s.label=e[5]),a&64&&(s.show_label=e[6]),a&2048&&(s.value=e[11]),a&128&&(s.file_count=e[7]),a&256&&(s.file_types=e[8]),a&512&&(s.selectable=e[9]),a&8388608&&(s.$$scope={dirty:a,ctx:e}),l.$set(s)},i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function Xe(t){let l,n;return l=new je({props:{type:"file"}}),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},p:C,i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function Ye(t){let l,n,e,a,s,i;const o=[t[10],{status:t[13]?"generating":t[10]?.status||"complete"}];let f={};for(let u=0;u{r[k]=null}),R(),a=r[e],a?a.p(u,b):(a=r[e]=d[e](u),a.c()),h(a,1),a.m(s.parentNode,s))},i(u){i||(h(l.$$.fragment,u),h(a),i=!0)},o(u){g(l.$$.fragment,u),g(a),i=!1},d(u){y(l,u),u&&j(n),r[e].d(u),u&&j(s)}}}function Ze(t){let l,n;return l=new ye({props:{visible:t[3],variant:t[4]==="dynamic"&&t[0]===null?"dashed":"solid",border_mode:t[12]?"focus":"base",padding:!1,elem_id:t[1],elem_classes:t[2],$$slots:{default:[Ye]},$$scope:{ctx:t}}}),{c(){A(l.$$.fragment)},m(e,a){v(l,e,a),n=!0},p(e,[a]){const s={};a&8&&(s.visible=e[3]),a&17&&(s.variant=e[4]==="dynamic"&&e[0]===null?"dashed":"solid"),a&4096&&(s.border_mode=e[12]?"focus":"base"),a&2&&(s.elem_id=e[1]),a&4&&(s.elem_classes=e[2]),a&8404977&&(s.$$scope={dirty:a,ctx:e}),l.$set(s)},i(e){n||(h(l.$$.fragment,e),n=!0)},o(e){g(l.$$.fragment,e),n=!1},d(e){y(l,e)}}}function Qe(t,l,n){let e,{elem_id:a=""}=l,{elem_classes:s=[]}=l,{visible:i=!0}=l,{value:o}=l,f,{mode:d}=l,{root:r}=l,{label:m}=l,{show_label:u}=l,{file_count:b}=l,{file_types:B=["file"]}=l,{root_url:k}=l,{selectable:w=!1}=l,{loading_status:S}=l,c=!1,p=!1;const D=G(),te=({detail:_})=>n(0,o=_),ne=({detail:_})=>n(12,c=_);function ae(_){O.call(this,t,_)}function se(_){O.call(this,t,_)}function ie(_){O.call(this,t,_)}return t.$$set=_=>{"elem_id"in _&&n(1,a=_.elem_id),"elem_classes"in _&&n(2,s=_.elem_classes),"visible"in _&&n(3,i=_.visible),"value"in _&&n(0,o=_.value),"mode"in _&&n(4,d=_.mode),"root"in _&&n(14,r=_.root),"label"in _&&n(5,m=_.label),"show_label"in _&&n(6,u=_.show_label),"file_count"in _&&n(7,b=_.file_count),"file_types"in _&&n(8,B=_.file_types),"root_url"in _&&n(15,k=_.root_url),"selectable"in _&&n(9,w=_.selectable),"loading_status"in _&&n(10,S=_.loading_status)},t.$$.update=()=>{if(t.$$.dirty&49153&&n(11,e=Y(o,r,k)),t.$$.dirty&116753&&JSON.stringify(e)!==JSON.stringify(f)){if(n(16,f=e),e===null)D("change"),n(13,p=!1);else if(!(Array.isArray(e)?e:[e]).every(_=>_.blob))n(13,p=!1);else if(d==="dynamic"){let _=(Array.isArray(e)?e:[e]).map(M=>M.blob),re=e;n(13,p=!0),ke(r,_).then(M=>{re===e&&(n(13,p=!1),M.error?(Array.isArray(e)?e:[e]).forEach(async(N,V)=>{N.data=await Ue(N.blob)}):((Array.isArray(e)?e:[e]).forEach((N,V)=>{M.files&&(N.orig_name=N.name,N.name=M.files[V],N.is_file=!0)}),n(11,e=Y(o,r,k))),D("change"),D("upload"))})}}},[o,a,s,i,d,m,u,b,B,w,S,e,c,p,r,k,f,te,ne,ae,se,ie]}class $e extends J{constructor(l){super(),P(this,l,Qe,Ze,q,{elem_id:1,elem_classes:2,visible:3,value:0,mode:4,root:14,label:5,show_label:6,file_count:7,file_types:8,root_url:15,selectable:9,loading_status:10})}}const ul=$e,_l=["static","dynamic"],cl=t=>({type:{input_payload:"{ name: string; data: string }",response_object:"{ orig_name: string; name: string, size: number, data: string, is_file: boolean}"},description:{input_payload:"object with file name and base64 data",response_object:"object that includes path to file. The URL: {ROOT}file={name} contains the data"},example_data:{name:"zip.zip",data:"data:@file/octet-stream;base64,UEsFBgAAAAAAAAAAAAAAAAAAAAAAAA=="}});export{ul as Component,cl as document,_l as modes}; -//# sourceMappingURL=index-ee29d65e.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/pylab.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/pylab.py deleted file mode 100644 index 289aa9050e0ca11189f4289a81c98cf2f8cb273c..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/pylab.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -.. warning:: - Since heavily importing into the global namespace may result in unexpected - behavior, the use of pylab is strongly discouraged. Use `matplotlib.pyplot` - instead. - -`pylab` is a module that includes `matplotlib.pyplot`, `numpy`, `numpy.fft`, -`numpy.linalg`, `numpy.random`, and some additional functions, all within -a single namespace. Its original purpose was to mimic a MATLAB-like way -of working by importing all functions into the global namespace. This is -considered bad style nowadays. -""" - -from matplotlib.cbook import flatten, silent_list - -import matplotlib as mpl - -from matplotlib.dates import ( - date2num, num2date, datestr2num, drange, DateFormatter, DateLocator, - RRuleLocator, YearLocator, MonthLocator, WeekdayLocator, DayLocator, - HourLocator, MinuteLocator, SecondLocator, rrule, MO, TU, WE, TH, FR, - SA, SU, YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, - relativedelta) - -# bring all the symbols in so folks can import them from -# pylab in one fell swoop - -## We are still importing too many things from mlab; more cleanup is needed. - -from matplotlib.mlab import ( - detrend, detrend_linear, detrend_mean, detrend_none, window_hanning, - window_none) - -from matplotlib import cbook, mlab, pyplot as plt -from matplotlib.pyplot import * - -from numpy import * -from numpy.fft import * -from numpy.random import * -from numpy.linalg import * - -import numpy as np -import numpy.ma as ma - -# don't let numpy's datetime hide stdlib -import datetime - -# This is needed, or bytes will be numpy.random.bytes from -# "from numpy.random import *" above -bytes = __import__("builtins").bytes diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_agg_filter.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_agg_filter.py deleted file mode 100644 index dc8cff6858ae52a85028b4f8b3266b4d1880f687..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_agg_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -import numpy as np - -import matplotlib.pyplot as plt -from matplotlib.testing.decorators import image_comparison - - -@image_comparison(baseline_images=['agg_filter_alpha'], - extensions=['png', 'pdf']) -def test_agg_filter_alpha(): - # Remove this line when this test image is regenerated. - plt.rcParams['pcolormesh.snap'] = False - - ax = plt.axes() - x, y = np.mgrid[0:7, 0:8] - data = x**2 - y**2 - mesh = ax.pcolormesh(data, cmap='Reds', zorder=5) - - def manual_alpha(im, dpi): - im[:, :, 3] *= 0.6 - print('CALLED') - return im, 0, 0 - - # Note: Doing alpha like this is not the same as setting alpha on - # the mesh itself. Currently meshes are drawn as independent patches, - # and we see fine borders around the blocks of color. See the SO - # question for an example: https://stackoverflow.com/q/20678817/ - mesh.set_agg_filter(manual_alpha) - - # Currently we must enable rasterization for this to have an effect in - # the PDF backend. - mesh.set_rasterized(True) - - ax.plot([0, 4, 7], [1, 3, 8]) diff --git a/spaces/lIlIlllllmeng/QQsign1/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat b/spaces/lIlIlllllmeng/QQsign1/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat deleted file mode 100644 index 4e44bab8aa65d16e35e935f1273de2e98ce80cf9..0000000000000000000000000000000000000000 --- a/spaces/lIlIlllllmeng/QQsign1/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat +++ /dev/null @@ -1,89 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem unidbg-fetch-qsign startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME%.. - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and UNIDBG_FETCH_QSIGN_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\lib\unidbg-fetch-qsign-1.1.0.jar;%APP_HOME%\lib\unidbg-fix.jar;%APP_HOME%\lib\ktor-server-content-negotiation-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-json-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-netty-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-host-common-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-core-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-events-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-websockets-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-cio-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-network-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-utils-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-io-jvm-2.3.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk8-1.8.22.jar;%APP_HOME%\lib\kotlinx-serialization-json-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-protobuf-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-core-jvm-1.5.1.jar;%APP_HOME%\lib\logback-classic-1.2.11.jar;%APP_HOME%\lib\kotlinx-coroutines-jdk8-1.7.1.jar;%APP_HOME%\lib\kotlinx-coroutines-core-jvm-1.7.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk7-1.8.22.jar;%APP_HOME%\lib\kotlin-reflect-1.8.10.jar;%APP_HOME%\lib\kotlin-stdlib-1.8.22.jar;%APP_HOME%\lib\slf4j-api-1.7.36.jar;%APP_HOME%\lib\kotlin-stdlib-common-1.8.22.jar;%APP_HOME%\lib\config-1.4.2.jar;%APP_HOME%\lib\jansi-2.4.0.jar;%APP_HOME%\lib\netty-codec-http2-4.1.92.Final.jar;%APP_HOME%\lib\alpn-api-1.1.3.v20160715.jar;%APP_HOME%\lib\netty-transport-native-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-epoll-4.1.92.Final.jar;%APP_HOME%\lib\logback-core-1.2.11.jar;%APP_HOME%\lib\annotations-23.0.0.jar;%APP_HOME%\lib\netty-codec-http-4.1.92.Final.jar;%APP_HOME%\lib\netty-handler-4.1.92.Final.jar;%APP_HOME%\lib\netty-codec-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-epoll-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-unix-common-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-4.1.92.Final.jar;%APP_HOME%\lib\netty-buffer-4.1.92.Final.jar;%APP_HOME%\lib\netty-resolver-4.1.92.Final.jar;%APP_HOME%\lib\netty-common-4.1.92.Final.jar - - -@rem Execute unidbg-fetch-qsign -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %UNIDBG_FETCH_QSIGN_OPTS% -classpath "%CLASSPATH%" MainKt %* - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable UNIDBG_FETCH_QSIGN_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%UNIDBG_FETCH_QSIGN_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/spaces/lakshmi324/Vehicle_Damage_Detector/app.py b/spaces/lakshmi324/Vehicle_Damage_Detector/app.py deleted file mode 100644 index 1bba86a6ccaefc7619ad0cf4cf59d11163bdfd4f..0000000000000000000000000000000000000000 --- a/spaces/lakshmi324/Vehicle_Damage_Detector/app.py +++ /dev/null @@ -1,131 +0,0 @@ - -import pandas as pd - -from torchvision import transforms -from detecto import core -from detecto import utils -from detecto.visualize import show_labeled_image -from detecto.core import Model -import matplotlib.pyplot as plt -import matplotlib.image as img -import matplotlib.pyplot as plt -import gradio as gr -import os -from typing import List, Optional -import matplotlib.pyplot as plt -import numpy as np -from matplotlib import patches -from pathlib import Path - -transform_img = transforms.Compose([transforms.ToPILImage(), - transforms.Resize(400), - transforms.RandomHorizontalFlip(0.5), - transforms.ToTensor(), - utils.normalize_transform(),]) - - -labels = ['damage','BG'] -ROOT_DIR = os.getcwd() -path = ROOT_DIR+'/Trained_Model.pth' -print('path',path) -model = Model.load(path, labels) # CHange this while uploading - - - -def prediction_defect(input_image,model = model): - '''Function takes input of the damaged vehicle - and provides the damaged area of the vehicle - ''' - image = utils.read_image(input_image) - new_image = transform_img(image) - labels, boxes, scores = model.predict(image) - top = len(scores[scores > .5]) - - return plot_bboxes( input_image, bboxes= boxes[:top], - xywh=False, labels=labels[:top]) - - - -def plot_bboxes( - image_file: str, - bboxes: List[List[float]], - xywh: bool = True, - labels: Optional[List[str]] = None -) -> None: - """ - Args: - image_file: str specifying the image file path - bboxes: list of bounding box annotations for all the detections - xywh: bool, if True, the bounding box annotations are specified as - [xmin, ymin, width, height]. If False the annotations are specified as - [xmin, ymin, xmax, ymax]. If you are unsure what the mode is try both - and check the saved image to see which setting gives the - correct visualization. - - """ - fig = plt.figure() - - # add axes to the image - ax = fig.add_axes([0, 0, 1, 1]) - - image_folder = Path(image_file).parent - - # read and plot the image - image = plt.imread(image_file) - plt.imshow(image) - - # Iterate over all the bounding boxes - for i, bbox in enumerate(bboxes): - if xywh: - xmin, ymin, w, h = bbox - else: - xmin, ymin, xmax, ymax = bbox - w = xmax - xmin - h = ymax - ymin - - # add bounding boxes to the image - box = patches.Rectangle( - (xmin, ymin), w, h, edgecolor="red", facecolor="none" - ) - - ax.add_patch(box) - - if labels is not None: - rx, ry = box.get_xy() - cx = rx + box.get_width()/2.0 - cy = ry + box.get_height()/8.0 - l = ax.annotate( - labels[i], - (cx, cy), - fontsize=8, - fontweight="bold", - color="white", - ha='center', - va='center' - ) - l.set_bbox( - dict(facecolor='red', alpha=0.5, edgecolor='red') - ) - - plt.axis('off') - outfile = os.path.join(image_folder, "image_bbox.jpg") - fig.savefig(outfile) - - print("Saved image with detections to %s" % outfile) - return outfile - - -gr.Interface(fn=prediction_defect, - - inputs = [ gr.inputs.Image(type="filepath", label="Please Upload the Defect Image") ], - outputs= [gr.outputs.Image(type="pil")], - examples=[]).launch(debug= True) - - - - - - - - - diff --git a/spaces/leilaglewis/04-Gradio-SOTA/qasrl_model_pipeline.py b/spaces/leilaglewis/04-Gradio-SOTA/qasrl_model_pipeline.py deleted file mode 100644 index 50135f76849bc8537fcae83b72532da661487da6..0000000000000000000000000000000000000000 --- a/spaces/leilaglewis/04-Gradio-SOTA/qasrl_model_pipeline.py +++ /dev/null @@ -1,183 +0,0 @@ -from typing import Optional -import json -from argparse import Namespace -from pathlib import Path -from transformers import Text2TextGenerationPipeline, AutoModelForSeq2SeqLM, AutoTokenizer - -def get_markers_for_model(is_t5_model: bool) -> Namespace: - special_tokens_constants = Namespace() - if is_t5_model: - # T5 model have 100 special tokens by default - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - - else: - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - return special_tokens_constants - -def load_trained_model(name_or_path): - import huggingface_hub as HFhub - tokenizer = AutoTokenizer.from_pretrained(name_or_path) - model = AutoModelForSeq2SeqLM.from_pretrained(name_or_path) - # load preprocessing_kwargs from the model repo on HF hub, or from the local model directory - kwargs_filename = None - if name_or_path.startswith("kleinay/"): # and 'preprocessing_kwargs.json' in HFhub.list_repo_files(name_or_path): # the supported version of HFhub doesn't support list_repo_files - kwargs_filename = HFhub.hf_hub_download(repo_id=name_or_path, filename="preprocessing_kwargs.json") - elif Path(name_or_path).is_dir() and (Path(name_or_path) / "experiment_kwargs.json").exists(): - kwargs_filename = Path(name_or_path) / "experiment_kwargs.json" - - if kwargs_filename: - preprocessing_kwargs = json.load(open(kwargs_filename)) - # integrate into model.config (for decoding args, e.g. "num_beams"), and save also as standalone object for preprocessing - model.config.preprocessing_kwargs = Namespace(**preprocessing_kwargs) - model.config.update(preprocessing_kwargs) - return model, tokenizer - - -class QASRL_Pipeline(Text2TextGenerationPipeline): - def __init__(self, model_repo: str, **kwargs): - model, tokenizer = load_trained_model(model_repo) - super().__init__(model, tokenizer, framework="pt") - self.is_t5_model = "t5" in model.config.model_type - self.special_tokens = get_markers_for_model(self.is_t5_model) - self.data_args = model.config.preprocessing_kwargs - # backward compatibility - default keyword values implemeted in `run_summarization`, thus not saved in `preprocessing_kwargs` - if "predicate_marker_type" not in vars(self.data_args): - self.data_args.predicate_marker_type = "generic" - if "use_bilateral_predicate_marker" not in vars(self.data_args): - self.data_args.use_bilateral_predicate_marker = True - if "append_verb_form" not in vars(self.data_args): - self.data_args.append_verb_form = True - self._update_config(**kwargs) - - def _update_config(self, **kwargs): - " Update self.model.config with initialization parameters and necessary defaults. " - # set default values that will always override model.config, but can overriden by __init__ kwargs - kwargs["max_length"] = kwargs.get("max_length", 80) - # override model.config with kwargs - for k,v in kwargs.items(): - self.model.config.__dict__[k] = v - - def _sanitize_parameters(self, **kwargs): - preprocess_kwargs, forward_kwargs, postprocess_kwargs = {}, {}, {} - if "predicate_marker" in kwargs: - preprocess_kwargs["predicate_marker"] = kwargs["predicate_marker"] - if "predicate_type" in kwargs: - preprocess_kwargs["predicate_type"] = kwargs["predicate_type"] - if "verb_form" in kwargs: - preprocess_kwargs["verb_form"] = kwargs["verb_form"] - return preprocess_kwargs, forward_kwargs, postprocess_kwargs - - def preprocess(self, inputs, predicate_marker="", predicate_type=None, verb_form=None): - # Here, inputs is string or list of strings; apply string postprocessing - if isinstance(inputs, str): - processed_inputs = self._preprocess_string(inputs, predicate_marker, predicate_type, verb_form) - elif hasattr(inputs, "__iter__"): - processed_inputs = [self._preprocess_string(s, predicate_marker, predicate_type, verb_form) for s in inputs] - else: - raise ValueError("inputs must be str or Iterable[str]") - # Now pass to super.preprocess for tokenization - return super().preprocess(processed_inputs) - - def _preprocess_string(self, seq: str, predicate_marker: str, predicate_type: Optional[str], verb_form: Optional[str]) -> str: - sent_tokens = seq.split(" ") - assert predicate_marker in sent_tokens, f"Input sentence must include a predicate-marker token ('{predicate_marker}') before the target predicate word" - predicate_idx = sent_tokens.index(predicate_marker) - sent_tokens.remove(predicate_marker) - sentence_before_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx)]) - predicate = sent_tokens[predicate_idx] - sentence_after_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx+1, len(sent_tokens))]) - - if self.data_args.predicate_marker_type == "generic": - predicate_marker = self.special_tokens.predicate_generic_marker - # In case we want special marker for each predicate type: """ - elif self.data_args.predicate_marker_type == "pred_type": - assert predicate_type is not None, "For this model, you must provide the `predicate_type` either when initializing QASRL_Pipeline(...) or when applying __call__(...) on it" - assert predicate_type in ("verbal", "nominal"), f"`predicate_type` must be either 'verbal' or 'nominal'; got '{predicate_type}'" - predicate_marker = {"verbal": self.special_tokens.predicate_verb_marker , - "nominal": self.special_tokens.predicate_nominalization_marker - }[predicate_type] - - if self.data_args.use_bilateral_predicate_marker: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {predicate_marker} {sentence_after_predicate}" - else: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {sentence_after_predicate}" - - # embed also verb_form - if self.data_args.append_verb_form and verb_form is None: - raise ValueError(f"For this model, you must provide the `verb_form` of the predicate when applying __call__(...)") - elif self.data_args.append_verb_form: - seq = f"{seq} {self.special_tokens.separator_input_question_predicate} {verb_form} " - else: - seq = f"{seq} " - - # append source prefix (for t5 models) - prefix = self._get_source_prefix(predicate_type) - - return prefix + seq - - def _get_source_prefix(self, predicate_type: Optional[str]): - if not self.is_t5_model or self.data_args.source_prefix is None: - return '' - if not self.data_args.source_prefix.startswith("<"): # Regular prefix - not dependent on input row x - return self.data_args.source_prefix - if self.data_args.source_prefix == "": - if predicate_type is None: - raise ValueError("source_prefix is '' but input no `predicate_type`.") - else: - return f"Generate QAs for {predicate_type} QASRL: " - - def _forward(self, *args, **kwargs): - outputs = super()._forward(*args, **kwargs) - return outputs - - - def postprocess(self, model_outputs): - output_seq = self.tokenizer.decode( - model_outputs["output_ids"].squeeze(), - skip_special_tokens=False, - clean_up_tokenization_spaces=False, - ) - output_seq = output_seq.strip(self.tokenizer.pad_token).strip(self.tokenizer.eos_token).strip() - qa_subseqs = output_seq.split(self.special_tokens.separator_output_pairs) - qas = [self._postrocess_qa(qa_subseq) for qa_subseq in qa_subseqs] - return {"generated_text": output_seq, - "QAs": qas} - - def _postrocess_qa(self, seq: str) -> str: - # split question and answers - if self.special_tokens.separator_output_question_answer in seq: - question, answer = seq.split(self.special_tokens.separator_output_question_answer)[:2] - else: - print("invalid format: no separator between question and answer found...") - return None - # question, answer = seq, '' # Or: backoff to only question - # skip "_" slots in questions - question = ' '.join(t for t in question.split(' ') if t != '_') - answers = [a.strip() for a in answer.split(self.special_tokens.separator_output_answers)] - return {"question": question, "answers": answers} - - -if __name__ == "__main__": - pipe = QASRL_Pipeline("kleinay/qanom-seq2seq-model-baseline") - res1 = pipe("The student was interested in Luke 's research about sea animals .", verb_form="research", predicate_type="nominal") - res2 = pipe(["The doctor was interested in Luke 's treatment .", - "The Veterinary student was interested in Luke 's treatment of sea animals ."], verb_form="treat", predicate_type="nominal", num_beams=10) - res3 = pipe("A number of professions have developed that specialize in the treatment of mental disorders .", verb_form="develop", predicate_type="verbal") - print(res1) - print(res2) - print(res3) - \ No newline at end of file diff --git a/spaces/leonel1122/Analog-Diffusion/app.py b/spaces/leonel1122/Analog-Diffusion/app.py deleted file mode 100644 index 53b378d1a7f3c5e8ccff85fa3360db0eae54e98f..0000000000000000000000000000000000000000 --- a/spaces/leonel1122/Analog-Diffusion/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'wavymulder/Analog-Diffusion' -prefix = '' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
      -
      -

      Analog Diffusion

      -
      -

      - Demo for Analog Diffusion Stable Diffusion model.
      - {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

      - Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space

      - Duplicate Space -
      - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
      -
      -

      This space was created using SD Space Creator.

      -
      - """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2013 Xforce Keygen X64 Processor [BETTER].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2013 Xforce Keygen X64 Processor [BETTER].md deleted file mode 100644 index 7d4e21d93ca215d90fae2843cb4f057f914d1ece..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2013 Xforce Keygen X64 Processor [BETTER].md +++ /dev/null @@ -1,132 +0,0 @@ -
      -

      How to Activate AutoCAD 2013 with X-Force Keygen for 64-bit Processor

      - -

      If you are looking for a way to activate AutoCAD 2013 on your 64-bit processor, you may have heard of X-Force keygen. This is a powerful tool that can generate activation keys for any Autodesk product, including AutoCAD 2013. In this article, we will show you how to use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor.

      - -

      What is AutoCAD 2013?

      - -

      AutoCAD 2013 is a software that allows you to design and shape the world around you with its flexible and powerful features. It is one of the world's leading 2D and 3D CAD design tools, used by professionals and enthusiasts alike. AutoCAD 2013 can help you create stunning drawings, models, and animations with ease and accuracy.

      -

      autocad 2013 xforce keygen x64 processor


      DOWNLOAD ✦✦✦ https://bytlly.com/2uGwzC



      - -

      What is X-Force Keygen?

      - -

      X-Force keygen is a software that can generate activation keys for any Autodesk product, including AutoCAD 2013. It works by bypassing the registration process and generating a valid serial number and product key for your software. X-Force keygen can save you money and time by allowing you to use AutoCAD 2013 without any limitations or restrictions.

      - -

      How to Use X-Force Keygen to Activate AutoCAD 2013 on 64-bit Processor?

      - -

      To use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor, you need to follow these steps:

      - -
        -
      1. Download X-Force keygen for AutoCAD 2013 from a reliable source. You can find the link for the 64-bit version here: x force keygen 2013 64 bit: xf-autocad-kg x64.exe 2013 free download - Link GG Drive. Make sure you download the correct version for your processor.
      2. -
      3. Disable your antivirus software and Windows Defender temporarily. This is because X-Force keygen may be detected as a virus or malware by some security programs. Don't worry, it is safe to use as long as you download it from a trusted source.
      4. -
      5. Extract the X-Force keygen file using WinRAR or any other extraction tool. You will get a file named xf-autocad-kg x64.exe.
      6. -
      7. Run the X-Force keygen file as administrator. You will see a window like this:
      8. -
      - -X-Force keygen window - -
        -
      1. Select AutoCAD 2013 from the drop-down menu and click on Patch. You will see a message saying Successfully patched!.
      2. -
      3. Open AutoCAD 2013 on your computer and click on Activate. You will see a window like this:
      4. -
      - -AutoCAD activation window - -
        -
      1. Copy the Request Code from the AutoCAD activation window and paste it into the Request field in the X-Force keygen window.
      2. -
      3. Click on Generate. You will see an Activation Code generated in the X-Force keygen window.
      4. -
      5. Copy the Activation Code from the X-Force keygen window and paste it into the I have an activation code from Autodesk field in the AutoCAD activation window.
      6. -
      7. Click on Next. You will see a message saying Your product has been activated!.
      8. -
      9. Congratulations! You have successfully activated AutoCAD 2013 with X-Force keygen on your 64-bit processor. Enjoy using your software without any limitations or restrictions.
      10. -
      - -

      Tips and Warnings

      - -
        -
      • X-Force keygen is only for educational purposes. We do not encourage or condone any illegal or unethical use of this software. Please support the developers by purchasing a genuine license of AutoCAD 2013 if you can afford it.
      • -
      • X-Force keygen may not work for some Autodesk products or versions. If you encounter any problems or errors while using this software, please try another method of activation or contact the support team of Autodesk.
      • -
      • X-Force keygen may be detected as a virus or malware by some antivirus programs or browsers. This is because it modifies the system files and registry entries of your software. Please make sure you download it from a reliable source and disable your security programs temporarily before using it.
      • -
      • X-Force keygen may not be compatible with some operating systems or processors. Please make sure you check the system requirements of your software and download the correct version of X-Force keygen for your processor (32-bit or 64-bit).
      • -
      - -

      Conclusion

      - -

      In this article, we have shown you how to use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor. This is a simple and effective way to use your software without any limitations or restrictions. However, we remind you that this method is only for educational purposes and we do not support any illegal or unethical use of this software. Please respect the intellectual property rights of Autodesk and purchase a genuine license of AutoCAD 2013 if you can afford it.

      - -

      We hope this article has been helpful for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      -

      Why Use X-Force Keygen to Activate AutoCAD 2013 on 64-bit Processor?

      - -

      There are many benefits of using X-Force keygen to activate AutoCAD 2013 on your 64-bit processor. Some of them are:

      - -
        -
      • You can use AutoCAD 2013 without any trial period or expiration date. You can enjoy all the features and functions of the software without any limitations or restrictions.
      • -
      • You can save money and time by not having to purchase a genuine license of AutoCAD 2013. You can use the software for free and without any hassle.
      • -
      • You can avoid any compatibility issues or errors that may arise from using a cracked or pirated version of AutoCAD 2013. X-Force keygen generates a valid serial number and product key that match your software and processor.
      • -
      • You can update your software and get the latest patches and fixes from Autodesk without any problem. X-Force keygen does not affect the functionality or performance of your software.
      • -
      - -

      What are the System Requirements for AutoCAD 2013 and X-Force Keygen?

      - -

      Before you use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor, you need to make sure that your system meets the minimum requirements for both the software and the keygen. Here are the system requirements for AutoCAD 2013 and X-Force keygen:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      AutoCAD 2013X-Force Keygen
      Operating System: Windows XP/Vista/7/8/10 (32-bit or 64-bit)Operating System: Windows XP/Vista/7/8/10 (32-bit or 64-bit)
      Processor: Intel Pentium 4 or AMD Athlon dual-core processor, 1.6 GHz or higher with SSE2 technologyProcessor: Intel Pentium 4 or AMD Athlon dual-core processor, 1.6 GHz or higher with SSE2 technology
      Memory: 2 GB RAM (4 GB recommended)Memory: 512 MB RAM (1 GB recommended)
      Hard Disk: 6 GB free disk space for installationHard Disk: 50 MB free disk space for installation
      Display: 1024 x 768 display resolution with true color (1600 x 1050 recommended)Display: Any display resolution with true color
      Graphics Card: Microsoft Direct3D-capable workstation-class graphics cardGraphics Card: Any graphics card that supports DirectX 9.0c or higher
      Internet Connection: Required for installation and activationInternet Connection: Required for downloading and activation
      - -

      If your system meets these requirements, you can proceed to use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor.

      - -

      Frequently Asked Questions about X-Force Keygen and AutoCAD 2013

      - -

      Here are some common questions and answers about X-Force keygen and AutoCAD 2013 that you may find useful:

      -

      - -
        -
      1. Is X-Force keygen safe to use?
        X-Force keygen is safe to use as long as you download it from a reliable source and disable your antivirus software and Windows Defender temporarily before using it. However, we do not guarantee that it will work for every Autodesk product or version, or that it will not cause any damage to your system or software. Use it at your own risk.
      2. -
      3. Is X-Force keygen legal to use?
        X-Force keygen is not legal to use, as it violates the intellectual property rights of Autodesk. It is only for educational purposes and we do not support any illegal or unethical use of this software. Please respect the developers by purchasing a genuine license of AutoCAD 2013 if you can afford it.
      4. -
      5. How long does X-Force keygen last?
        X-Force keygen lasts indefinitely, as it generates a permanent activation code for your software. You do not need to renew or reactivate your software after using X-Force keygen.
      6. -
      7. Can I update my software after using X-Force keygen?
        Yes, you can update your software after using X-Force keygen, as it does not affect the functionality or performance of your software. However, some updates may require a new activation code, which you can generate again using X-Force keygen.
      8. -
      9. Can I use X-Force keygen on multiple computers?
        No, you cannot use X-Force keygen on multiple computers, as it generates a unique activation code for each computer based on its processor and system information. If you try to use the same activation code on another computer, it will not work.
      10. -
      11. Can I use X-Force keygen on Mac?
        No, you cannot use X-Force keygen on Mac, as it is only compatible with Windows operating systems. However, there are other versions of X-Force keygen that are made for Mac operating systems, which you can find online.
      12. -
      - -

      Final Words

      - -

      In this article, we have shown you how to use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor. This is a simple and effective way to use your software without any limitations or restrictions. However, we remind you that this method is only for educational purposes and we do not support any illegal or unethical use of this software. Please respect the intellectual property rights of Autodesk and purchase a genuine license of AutoCAD 2013 if you can afford it.

      - -

      We hope this article has been helpful for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      -

      In this article, we have shown you how to use X-Force keygen to activate AutoCAD 2013 on your 64-bit processor. This is a simple and effective way to use your software without any limitations or restrictions. However, we remind you that this method is only for educational purposes and we do not support any illegal or unethical use of this software. Please respect the intellectual property rights of Autodesk and purchase a genuine license of AutoCAD 2013 if you can afford it.

      - -

      We hope this article has been helpful for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Berlitz English Level 3 (Book And Audio).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Berlitz English Level 3 (Book And Audio).md deleted file mode 100644 index ef7504d136d22f09c96971716cee7967d2bb0d5e..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Berlitz English Level 3 (Book And Audio).md +++ /dev/null @@ -1,6 +0,0 @@ -

      Berlitz English Level 3 (Book And Audio)


      Download Ziphttps://bytlly.com/2uGxCJ



      - -Learning English, Spanish, German, French, Mandarin, Arabic and other languages. All levels of language teaching. Tutor is a certified linguist and foreign language teacher. Tutor in English in Odessa. French tutor in Odessa. Spanish tutor in Odessa. German tutor in Odessa. Italian language tutor in Odessa. Tutor in Arabic in Odessa. English tutor in Odessa. French tutor in Odessa. Tutor in German in Odessa. Spanish tutor in Odessa. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/CRACK Boilsoft Video Joiner 7.02.2.md b/spaces/lincquiQcaudo/Top-20-Diffusion/CRACK Boilsoft Video Joiner 7.02.2.md deleted file mode 100644 index a3956c013329547e06fc89a067eab98bf7feaf28..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/CRACK Boilsoft Video Joiner 7.02.2.md +++ /dev/null @@ -1,14 +0,0 @@ -

      CRACK Boilsoft Video Joiner 7.02.2


      Download Zip 🌟 https://bytlly.com/2uGvNH



      - -avi/.mpg/.wmv/.mov/.mpeg video files. The output file is a single video file. It supports all popular video formats. This is the most powerful video joiner software which can join and merge multiple videos with complete professional video joining tools and video editing functions. - -AVI/MPEG/WMV/RM/MOV Joiner - Now AVI/MPEG/WMV/RM/MOV Joiner is a powerful video joiner that allows you to merge different AVI, MPEG, WMV, RM, MOV, MP4, M4V videos into one single video file that can be played on different video players and devices. The software supports all popular videos such as AVI, MPEG, WMV, RM, MOV, MP4, M4V videos. The output file is supported the most popular formats such as AVI, MPEG, WMV, RM, MOV, MP4, M4V videos. - -AVI/MPEG/MPG/WMV/RM/MOV/MTS/MP4 Joiner - Now AVI/MPEG/MPG/WMV/RM/MOV/MTS/MP4 Joiner is a powerful video joiner that allows you to merge different AVI, MPEG, MPG, WMV, RM, MTS, MP4 videos into one single video file that can be played on different video players and devices. The software supports all popular videos such as AVI, MPEG, MPG, WMV, RM, MTS, MP4 videos. The output file is supported the most popular formats such as AVI, MPEG, MPG, WMV, RM, MTS, MP4 videos. - -WinX DVD Ripper - WinX DVD Ripper is powerful and professional DVD to AVI Ripper software. It can rip your DVD to AVI, MKV, MOV, WMV, MP4, MP3, M4A, etc. files with high speed and excellent quality. It supports all popular DVDs and DVD-5 discs. Its AVI Ripper module allows you to extract the main parts from your DVD movie, then encode them to AVI, MPG, MPEG, MP4, M4V, MOV, WMV, etc. format. Moreover, you can choose audio track from the original DVD and then merge it with output video as a multi-track AVI video. - -AVI/MPEG/MP4/M4V 4fefd39f24
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Foundations Fluid Mechanics Sw Yuan Pdf Downloads Torrent UPD.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Foundations Fluid Mechanics Sw Yuan Pdf Downloads Torrent UPD.md deleted file mode 100644 index 36b40d8b51b55d2fc51963afc57f75a5849ff81c..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Foundations Fluid Mechanics Sw Yuan Pdf Downloads Torrent UPD.md +++ /dev/null @@ -1,12 +0,0 @@ -

      foundations fluid mechanics sw yuan pdf downloads torrent


      Download Zip >>>>> https://bytlly.com/2uGy4b



      - -catalog. fundamentals of fluid mechanics yuan s w free. Fundamentals of hydromechanics sw yuan pdf download torrent. buy the book "Fundamentals of hydromechanics". Fundamentals of hydromechanics. -On this page you can buy the book "Fundamentals of Hydromechanics". -Fundamentals of hydromechanics sw yuan pdf download torrent. -Fundamentals of hydromechanics. -Fundamentals of hydromechanics, sw yuan pdf download torrent. -Fundamentals of hydromechanics sw yuan pdf download torrent. buy the book "Fundamentals of hydromechanics". -On this page you can buy the book "Fundamentals of Fluid Mechanics". 8a78ff9644
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/MagicISO Maker 5.5 Serial !FREE!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/MagicISO Maker 5.5 Serial !FREE!.md deleted file mode 100644 index e1154b85d2a62a98596c13658de9878c71ef3e6b..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/MagicISO Maker 5.5 Serial !FREE!.md +++ /dev/null @@ -1,35 +0,0 @@ -
      -

      MagicISO Maker 5.5 Serial: How to Create and Edit CD/DVD Image Files

      - -

      If you are looking for a powerful and easy-to-use tool to create and edit CD/DVD image files, you may want to try MagicISO Maker 5.5. This software allows you to open, create, edit and extract CD/DVD image files, such as ISO, BIN, NRG, IMG, DAA and more. You can also convert between different image formats, make bootable disks, mount virtual drives and burn image files to CDs or DVDs.

      -

      MagicISO Maker 5.5 Serial


      DOWNLOAD ⇒⇒⇒ https://bytlly.com/2uGvZL



      - -

      However, to enjoy the full features of MagicISO Maker 5.5, you need a valid serial number or unlock key. This is a code that you can enter when you install or run the software to activate it. Without a serial number, you can only use MagicISO Maker 5.5 as a trial version, which has some limitations and restrictions.

      - -

      Where to Get MagicISO Maker 5.5 Serial Number

      - -

      There are several ways to get a MagicISO Maker 5.5 serial number. One way is to purchase it from the official website of MagicISO. This is the most legal and safe way to get the software and support the developers. However, this may cost you some money and you may not want to spend it on a software that you may not use frequently.

      - -

      Another way is to download it from the internet for free. There are many websites that offer MagicISO Maker 5.5 serial numbers or crack files that can bypass the activation process. However, this is not recommended as it may be illegal, unsafe and unethical. You may risk downloading viruses, malware or spyware that can harm your computer or compromise your personal information. You may also violate the copyright laws and face legal consequences.

      -

      - -

      A third way is to generate your own MagicISO Maker 5.5 serial number using a keygen program. A keygen is a software that can create random serial numbers for various programs based on some algorithms. However, this is also not advisable as it may be illegal, unreliable and risky. You may generate an invalid or duplicate serial number that may not work or cause problems with the software. You may also expose your computer to viruses, malware or spyware that can come with the keygen program.

      - -

      How to Use MagicISO Maker 5.5 Serial Number

      - -

      If you have obtained a valid MagicISO Maker 5.5 serial number from a trusted source, you can use it to activate the software and enjoy its full features. Here are the steps to use MagicISO Maker 5.5 serial number:

      - -
        -
      1. Download MagicISO Maker 5.5 from the official website or a reputable source.
      2. -
      3. Install MagicISO Maker 5.5 on your computer by following the instructions.
      4. -
      5. Run MagicISO Maker 5.5 and enter your name and serial number when prompted.
      6. -
      7. Click OK and enjoy the software.
      8. -
      - -

      If you have any problems with using MagicISO Maker 5.5 serial number, you can contact the customer support of MagicISO or visit their online forum for help.

      - -

      Conclusion

      - -

      MagicISO Maker 5.5 is a great tool for creating and editing CD/DVD image files. However, you need a serial number to activate it and use its full features. You can get a serial number from various sources, but you should be careful about the legality, safety and ethics of doing so. The best way is to purchase it from the official website of MagicISO or use a free alternative software that does not require a serial number.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/lizhongping2713/StableDiffusion-WebUI/Dockerfile b/spaces/lizhongping2713/StableDiffusion-WebUI/Dockerfile deleted file mode 100644 index 5509b6d80fc3fb9fc0b33f6e57e3308921f09010..0000000000000000000000000000000000000000 --- a/spaces/lizhongping2713/StableDiffusion-WebUI/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM livebook/livebook:latest-cuda11.8 - -ENV LIVEBOOK_APP_SERVICE_NAME "🐳 Hugging Face - $SPACE_TITLE" -ENV LIVEBOOK_APP_SERVICE_URL "https://huggingface.co/spaces/$SPACE_AUTHOR_NAME/$SPACE_REPO_NAME" -ENV LIVEBOOK_UPDATE_INSTRUCTIONS_URL "https://livebook.dev" -ENV LIVEBOOK_WITHIN_IFRAME "true" -ENV LIVEBOOK_DATA_PATH "/data" -ENV LIVEBOOK_PORT 7860 - -EXPOSE 7860 - -USER root -RUN mkdir -p /data -RUN chmod 777 /data diff --git a/spaces/lsli/lab/README.md b/spaces/lsli/lab/README.md deleted file mode 100644 index 60f16b29cd859045a90c0b53fcc43d61e9f8010a..0000000000000000000000000000000000000000 --- a/spaces/lsli/lab/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Lab -emoji: 👁 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lusea/Voice-Cloning-for-Bilibili/Makefile b/spaces/lusea/Voice-Cloning-for-Bilibili/Makefile deleted file mode 100644 index ad23323414bd2175956f6aef92f223a02f7258be..0000000000000000000000000000000000000000 --- a/spaces/lusea/Voice-Cloning-for-Bilibili/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: quality style - -# Check that source code meets quality standards -quality: - black --check --diff . - ruff . - -# Format source code automatically -style: - black . - ruff . --fix diff --git a/spaces/ma-xu/LIVE/thrust/dependencies/cub/CONTRIBUTING.md b/spaces/ma-xu/LIVE/thrust/dependencies/cub/CONTRIBUTING.md deleted file mode 100644 index dd641756387902cb775c44e669564997dd83f325..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/dependencies/cub/CONTRIBUTING.md +++ /dev/null @@ -1,366 +0,0 @@ -# Table of Contents - -1. [Contributing to CUB](#contributing-to-cub) -1. [CMake Options](#cmake-options) -1. [Development Model](#development-model) - -# Contributing to CUB - -CUB uses Github to manage all open-source development, including bug tracking, -pull requests, and design discussions. This document details how to get -started as a CUB contributor. - -An overview of this process is: - -1. [Clone the CUB repository](#clone-the-cub-repository) -1. [Setup a fork of CUB](#setup-a-fork-of-cub) -1. [Setup your environment](#setup-your-environment) -1. [Create a development branch](#create-a-development-branch) -1. [Local development loop](#local-development-loop) -1. [Push development branch to your fork](#push-development-branch-to-your-fork) -1. [Create pull request](#create-pull-request) -1. [Address feedback and update pull request](#address-feedback-and-update-pull-request) -1. [When your PR is approved...](#when-your-pr-is-approved) - -## Clone the CUB Repository - -To get started, clone the main repository to your local computer: - -``` -git clone https://github.com/thrust/cub.git -cd cub -``` - -## Setup a Fork of CUB - -You'll need a fork of CUB on Github to create a pull request. To setup your -fork: - -1. Create a Github account (if needed) -2. Go to [the CUB Github page](https://github.com/thrust/cub) -3. Click "Fork" and follow any prompts that appear. - -Once your fork is created, setup a new remote repo in your local CUB clone: - -``` -git remote add github-fork git@github.com:/cub.git -``` - -## Setup Your Environment - -### Git Environment - -If you haven't already, this is a good time to tell git who you are. This -information is used to fill out authorship information on your git commits. - -``` -git config --global user.name "John Doe" -git config --global user.email johndoe@example.com -``` - -### Configure CMake builds - -CUB uses [CMake](https://www.cmake.org) for its developer build system. To -configure, build, and test your checkout of CUB with default settings: - -``` -# Create build directory: -mkdir build -cd build - -# Configure -- use one of the following: -cmake .. # Command line interface. -ccmake .. # ncurses GUI (Linux only) -cmake-gui # Graphical UI, set source/build directories in the app - -# Build: -cmake --build . -j # invokes make (or ninja, etc) - -# Run tests and examples: -ctest -``` - -See [CMake Options](#cmake-options) for details on customizing the build. - -## Create a Development Branch - -All work should be done in a development branch (also called a "topic branch") -and not directly in the `master` branch. This makes it easier to manage multiple -in-progress patches at once, and provides a descriptive label for your patch -as it passes through the review system. - -To create a new branch based on the current `master`: - -``` -# Checkout local master branch: -cd /path/to/cub/sources -git checkout master - -# Sync local master branch with github: -git pull - -# Create a new branch named `my_descriptive_branch_name` based on master: -git checkout -b my_descriptive_branch_name - -# Verify that the branch has been created and is currently checked out: -git branch -``` - -CUB branch names should follow a particular pattern: - -- For new features, name the branch `feature/` -- For bugfixes associated with a github issue, use `bug/github/-` - - Internal nvidia and gitlab bugs should use `nvidia` or `gitlab` in place of - `github`. - -## Local Development Loop - -### Edit, Build, Test, Repeat - -Once the topic branch is created, you're all set to start working on CUB -code. Make some changes, then build and test them: - -``` -# Implement changes: -cd /path/to/cub/sources -emacs cub/some_file.cuh # or whatever editor you prefer - -# Create / update a unit test for your changes: -emacs tests/some_test.cu - -# Check that everything builds and tests pass: -cd /path/to/cub/build/directory -cmake --build . -j # or make, ninja, etc -ctest -``` - -### Creating a Commit - -Once you're satisfied with your patch, commit your changes: - -``` -# Manually add changed files and create a commit: -cd /path/to/cub -git add cub/some_file.cuh -git add tests/some_test.cu -git commit - -# Or, if possible, use git-gui to review your changes while building your patch: -git gui -``` - -#### Writing a Commit Message - -Your commit message will communicate the purpose and rationale behind your -patch to other developers, and will be used to populate the initial description -of your Github pull request. - -When writing a commit message, the following standard format should be used, -since tools in the git ecosystem are designed to parse this correctly: - -``` -First line of commit message is a short summary (<80 char) - -Detailed description of change begins on third line. This portion can -span multiple lines, try to manually wrap them at something reasonable. - -Blank lines can be used to separate multiple paragraphs in the description. - -If your patch is associated with another pull request or issue in the main -CUB repository, you should reference it with a `#` symbol, e.g. -#1023 for issue 1023. - -For issues / pull requests in a different github repo, reference them using -the full syntax, e.g. thrust/thrust#4 for issue 4 in the thrust/thrust repo. - -Markdown is recommended for formatting more detailed messages, as these will -be nicely rendered on Github, etc. -``` - -## Push Development Branch to your Fork - -Once you've committed your changes to a local development branch, it's time to -push them to your fork: - -``` -cd /path/to/cub/checkout -git checkout my_descriptive_branch_name # if not already checked out -git push --set-upstream github-fork my_descriptive_branch_name -``` - -`--set-upstream github-fork` tells git that future pushes/pulls on this branch -should target your `github-fork` remote by default. - -## Create Pull Request - -To create a pull request for your freshly pushed branch, open your github fork -in a browser by going to `https://www.github.com//cub`. A -prompt may automatically appear asking you to create a pull request if you've -recently pushed a branch. - -If there's no prompt, go to "Code" > "Branches" and click the appropriate -"New pull request" button for your branch. - -If you would like a specific developer to review your patch, feel free to -request them as a reviewer at this time. - -The CUB team will review your patch, test it on NVIDIA's internal CI, and -provide feedback. - -## Address Feedback and Update Pull Request - -If the reviewers request changes to your patch, use the following process to -update the pull request: - -``` -# Make changes: -cd /path/to/cub/sources -git checkout my_descriptive_branch_name -emacs cub/some_file.cuh -emacs tests/some_test.cu - -# Build + test -cd /path/to/thrust/build/directory -cmake --build . -j -ctest - -# Amend commit: -cd /path/to/cub/sources -git add cub/some_file.cuh -git add tests/some_test.cu -git commit --amend -# Or -git gui # Check the "Amend Last Commit" box - -# Update the branch on your fork: -git push -f -``` - -At this point, the pull request should show your recent changes. - -## When Your PR is Approved - -Once your pull request is approved by the CUB team, no further action is -needed from you. We will handle integrating it since we must coordinate changes -to `master` with NVIDIA's internal perforce repository. - -# CMake Options - -A CUB build is configured using CMake options. These may be passed to CMake -using - -``` -cmake -D= /path/to/cub/sources -``` - -or configured interactively with the `ccmake` or `cmake-gui` interfaces. - -The configuration options for CUB are: - -- `CMAKE_BUILD_TYPE={Release, Debug, RelWithDebInfo, MinSizeRel}` - - Standard CMake build option. Default: `RelWithDebInfo` -- `CUB_ENABLE_HEADER_TESTING={ON, OFF}` - - Whether to test compile public headers. Default is `ON`. -- `CUB_ENABLE_TESTING={ON, OFF}` - - Whether to build unit tests. Default is `ON`. -- `CUB_ENABLE_EXAMPLES={ON, OFF}` - - Whether to build examples. Default is `ON`. -- `CUB_ENABLE_DIALECT_CPPXX={ON, OFF}` - - Toggle whether a specific C++ dialect will be targeted. - - Multiple dialects may be targeted in a single build. - - Possible values of `XX` are `{11, 14, 17}`. - - By default, only C++14 is enabled. -- `CUB_ENABLE_COMPUTE_XX={ON, OFF}` - - Controls the targeted CUDA architecture(s) - - Multiple options may be selected when using NVCC as the CUDA compiler. - - Valid values of `XX` are: - `{35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80}` - - Default value depends on `CUB_DISABLE_ARCH_BY_DEFAULT`: -- `CUB_ENABLE_COMPUTE_FUTURE={ON, OFF}` - - If enabled, CUDA objects will target the most recent virtual architecture - in addition to the real architectures specified by the - `CUB_ENABLE_COMPUTE_XX` options. - - Default value depends on `CUB_DISABLE_ARCH_BY_DEFAULT`: -- `CUB_DISABLE_ARCH_BY_DEFAULT={ON, OFF}` - - When `ON`, all `CUB_ENABLE_COMPUTE_*` options are initially `OFF`. - - Default: `OFF` (meaning all architectures are enabled by default) -- `CUB_ENABLE_TESTS_WITH_RDC={ON, OFF}` - - Whether to enable Relocatable Device Code when building tests. - Default is `OFF`. -- `CUB_ENABLE_EXAMPLES_WITH_RDC={ON, OFF}` - - Whether to enable Relocatable Device Code when building examples. - Default is `OFF`. - -# Development Model - -The following is a description of the basic development process that CUB follows. This is a living -document that will evolve as our process evolves. - -CUB is distributed in three ways: - - * On GitHub. - * In the NVIDIA HPC SDK. - * In the CUDA Toolkit. - -## Trunk Based Development - -CUB uses [trunk based development](https://trunkbaseddevelopment.com). There is a single long-lived -branch called `master`. Engineers may create branches for feature development. Such branches always -merge into `master`. There are no release branches. Releases are produced by taking a snapshot of -`master` ("snapping"). After a release has been snapped from `master`, it will never be changed. - -## Repositories - -As CUB is developed both on GitHub and internally at NVIDIA, there are three main places where code lives: - - * The Source of Truth, the [public CUB repository](https://github.com/thrust/cub), referred to as - `github` later in this document. - * An internal GitLab repository, referred to as `gitlab` later in this document. - * An internal Perforce repository, referred to as `perforce` later in this document. - -## Versioning - -CUB has its own versioning system for releases, independent of the versioning scheme of the NVIDIA -HPC SDK or the CUDA Toolkit. - -Today, CUB version numbers have a specific [semantic meaning](https://semver.org/). -Releases prior to 1.10.0 largely, but not strictly, followed these semantic meanings. - -The version number for a CUB release uses the following format: `MMM.mmm.ss-ppp`, where: - - * `CUB_VERSION_MAJOR`/`MMM`: Major version, up to 3 decimal digits. It is incremented - when the fundamental nature of the library evolves, leading to widespread changes across the - entire library interface with no guarantee of API, ABI, or semantic compatibility with former - versions. - * `CUB_VERSION_MINOR`/`mmm`: Minor version, up to 3 decimal digits. It is incremented when - breaking API, ABI, or semantic changes are made. - * `CUB_VERSION_SUBMINOR`/`ss`: Subminor version, up to 2 decimal digits. It is incremented - when notable new features or bug fixes or features that are API, ABI, and semantic backwards - compatible are added. - * `CUB_PATCH_NUMBER`/`ppp`: Patch number, up to 3 decimal digits. It is incremented if any - change in the repo whatsoever is made and no other version component has been incremented. - -The `` header defines `CUB_*` macros for all of the version components mentioned -above. Additionally, a `CUB_VERSION` macro is defined, which is an integer literal containing all -of the version components except for `CUB_PATCH_NUMBER`. - -## Branches and Tags - -The following tag names are used in the CUB project: - - * `github/nvhpc-X.Y`: the tag that directly corresponds to what has been shipped in the NVIDIA HPC SDK release X.Y. - * `github/cuda-X.Y`: the tag that directly corresponds to what has been shipped in the CUDA Toolkit release X.Y. - * `github/A.B.C`: the tag that directly corresponds to a CUB version A.B.C. - -The following branch names are used in the CUB project: - - * `github/master`: the Source of Truth development branch of CUB. - * `github/old-master`: the old Source of Truth branch, before unification of public and internal repositories. - * `github/feature/`: feature branch for a feature under development. - * `github/bug//-`: bug fix branch, where `bug-system` is `github` or `nvidia`. - * `gitlab/master`: mirror of `github/master`. - * `perforce/private`: mirrored `github/master`, plus files necessary for internal NVIDIA testing systems. - -On the rare occasion that we cannot do work in the open, for example when developing a change specific to an -unreleased product, these branches may exist on `gitlab` instead of `github`. By default, everything should be -in the open on `github` unless there is a strong motivation for it to not be open. diff --git a/spaces/magicr/BuboGPT/bubogpt/common/registry.py b/spaces/magicr/BuboGPT/bubogpt/common/registry.py deleted file mode 100644 index 564d59ca58bb12b3f863513de90ec6d90fabba34..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/bubogpt/common/registry.py +++ /dev/null @@ -1,333 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - - - -class Registry: - mapping = { - "builder_name_mapping": {}, - "task_name_mapping": {}, - "processor_name_mapping": {}, - "model_name_mapping": {}, - "lr_scheduler_name_mapping": {}, - "runner_name_mapping": {}, - "state": {}, - "paths": {}, - } - - @classmethod - def register_builder(cls, name): - r"""Register a dataset builder to registry with key 'name' - - Args: - name: Key with which the builder will be registered. - - Usage: - - from bubogpt.common.registry import registry - from bubogpt.datasets.base_dataset_builder import BaseDatasetBuilder - """ - - def wrap(builder_cls): - # TODO: merge them or split builders by modality - from bubogpt.datasets.builders.image_base_dataset_builder import ImageBaseDatasetBuilder - from bubogpt.datasets.builders.audio_base_dataset_builder import AudioBaseDatasetBuilder - from bubogpt.datasets.builders.multimodal_base_dataset_builder import MultimodalBaseDatasetBuilder - - assert issubclass( - builder_cls, (ImageBaseDatasetBuilder, AudioBaseDatasetBuilder, MultimodalBaseDatasetBuilder) - ), "All builders must inherit BaseDatasetBuilder class, found {}".format( - builder_cls - ) - if name in cls.mapping["builder_name_mapping"]: - raise KeyError( - "Name '{}' already registered for {}.".format( - name, cls.mapping["builder_name_mapping"][name] - ) - ) - cls.mapping["builder_name_mapping"][name] = builder_cls - return builder_cls - - return wrap - - @classmethod - def register_task(cls, name): - r"""Register a task to registry with key 'name' - - Args: - name: Key with which the task will be registered. - - Usage: - - from bubogpt.common.registry import registry - """ - - def wrap(task_cls): - from bubogpt.tasks.base_task import BaseTask - - assert issubclass( - task_cls, BaseTask - ), "All tasks must inherit BaseTask class" - if name in cls.mapping["task_name_mapping"]: - raise KeyError( - "Name '{}' already registered for {}.".format( - name, cls.mapping["task_name_mapping"][name] - ) - ) - cls.mapping["task_name_mapping"][name] = task_cls - return task_cls - - return wrap - - @classmethod - def register_model(cls, name): - r"""Register a task to registry with key 'name' - - Args: - name: Key with which the task will be registered. - - Usage: - - from bubogpt.common.registry import registry - """ - - def wrap(model_cls): - from bubogpt.models import BaseModel - - assert issubclass( - model_cls, BaseModel - ), "All models must inherit BaseModel class" - if name in cls.mapping["model_name_mapping"]: - raise KeyError( - "Name '{}' already registered for {}.".format( - name, cls.mapping["model_name_mapping"][name] - ) - ) - cls.mapping["model_name_mapping"][name] = model_cls - return model_cls - - return wrap - - @classmethod - def register_processor(cls, name): - r"""Register a processor to registry with key 'name' - - Args: - name: Key with which the task will be registered. - - Usage: - - from bubogpt.common.registry import registry - """ - - def wrap(processor_cls): - from bubogpt.processors import BaseProcessor - - assert issubclass( - processor_cls, BaseProcessor - ), "All processors must inherit BaseProcessor class" - if name in cls.mapping["processor_name_mapping"]: - raise KeyError( - "Name '{}' already registered for {}.".format( - name, cls.mapping["processor_name_mapping"][name] - ) - ) - cls.mapping["processor_name_mapping"][name] = processor_cls - return processor_cls - - return wrap - - @classmethod - def register_lr_scheduler(cls, name): - r"""Register a model to registry with key 'name' - - Args: - name: Key with which the task will be registered. - - Usage: - - from bubogpt.common.registry import registry - """ - - def wrap(lr_sched_cls): - if name in cls.mapping["lr_scheduler_name_mapping"]: - raise KeyError( - "Name '{}' already registered for {}.".format( - name, cls.mapping["lr_scheduler_name_mapping"][name] - ) - ) - cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls - return lr_sched_cls - - return wrap - - @classmethod - def register_runner(cls, name): - r"""Register a model to registry with key 'name' - - Args: - name: Key with which the task will be registered. - - Usage: - - from bubogpt.common.registry import registry - """ - - def wrap(runner_cls): - if name in cls.mapping["runner_name_mapping"]: - raise KeyError( - "Name '{}' already registered for {}.".format( - name, cls.mapping["runner_name_mapping"][name] - ) - ) - cls.mapping["runner_name_mapping"][name] = runner_cls - return runner_cls - - return wrap - - @classmethod - def register_path(cls, name, path): - r"""Register a path to registry with key 'name' - - Args: - name: Key with which the path will be registered. - - Usage: - - from bubogpt.common.registry import registry - """ - assert isinstance(path, str), "All path must be str." - if name in cls.mapping["paths"]: - raise KeyError("Name '{}' already registered.".format(name)) - cls.mapping["paths"][name] = path - - @classmethod - def register(cls, name, obj): - r"""Register an item to registry with key 'name' - - Args: - name: Key with which the item will be registered. - - Usage:: - - from bubogpt.common.registry import registry - - registry.register("config", {}) - """ - path = name.split(".") - current = cls.mapping["state"] - - for part in path[:-1]: - if part not in current: - current[part] = {} - current = current[part] - - current[path[-1]] = obj - - # @classmethod - # def get_trainer_class(cls, name): - # return cls.mapping["trainer_name_mapping"].get(name, None) - - @classmethod - def get_builder_class(cls, name): - return cls.mapping["builder_name_mapping"].get(name, None) - - @classmethod - def get_model_class(cls, name): - return cls.mapping["model_name_mapping"].get(name, None) - - @classmethod - def get_task_class(cls, name): - return cls.mapping["task_name_mapping"].get(name, None) - - @classmethod - def get_processor_class(cls, name): - return cls.mapping["processor_name_mapping"].get(name, None) - - @classmethod - def get_lr_scheduler_class(cls, name): - return cls.mapping["lr_scheduler_name_mapping"].get(name, None) - - @classmethod - def get_runner_class(cls, name): - return cls.mapping["runner_name_mapping"].get(name, None) - - @classmethod - def list_runners(cls): - return sorted(cls.mapping["runner_name_mapping"].keys()) - - @classmethod - def list_models(cls): - return sorted(cls.mapping["model_name_mapping"].keys()) - - @classmethod - def list_tasks(cls): - return sorted(cls.mapping["task_name_mapping"].keys()) - - @classmethod - def list_processors(cls): - return sorted(cls.mapping["processor_name_mapping"].keys()) - - @classmethod - def list_lr_schedulers(cls): - return sorted(cls.mapping["lr_scheduler_name_mapping"].keys()) - - @classmethod - def list_datasets(cls): - return sorted(cls.mapping["builder_name_mapping"].keys()) - - @classmethod - def get_path(cls, name): - return cls.mapping["paths"].get(name, None) - - @classmethod - def get(cls, name, default=None, no_warning=False): - r"""Get an item from registry with key 'name' - - Args: - name (string): Key whose value needs to be retrieved. - default: If passed and key is not in registry, default value will - be returned with a warning. Default: None - no_warning (bool): If passed as True, warning when key doesn't exist - will not be generated. Useful for MMF's - internal operations. Default: False - """ - original_name = name - name = name.split(".") - value = cls.mapping["state"] - for subname in name: - value = value.get(subname, default) - if value is default: - break - - if ( - "writer" in cls.mapping["state"] - and value == default - and no_warning is False - ): - cls.mapping["state"]["writer"].warning( - "Key {} is not present in registry, returning default value " - "of {}".format(original_name, default) - ) - return value - - @classmethod - def unregister(cls, name): - r"""Remove an item from registry with key 'name' - - Args: - name: Key which needs to be removed. - Usage:: - - from mmf.common.registry import registry - - config = registry.unregister("config") - """ - return cls.mapping["state"].pop(name, None) - - -registry = Registry() diff --git a/spaces/mahmuod/CLIP-Interrogator/app.py b/spaces/mahmuod/CLIP-Interrogator/app.py deleted file mode 100644 index 6089758d3b0499cbfd1e44d42a5f7f5f7cc8d01f..0000000000000000000000000000000000000000 --- a/spaces/mahmuod/CLIP-Interrogator/app.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python3 -import gradio as gr -import os -from clip_interrogator import Config, Interrogator -from huggingface_hub import hf_hub_download -from share_btn import community_icon_html, loading_icon_html, share_js - -MODELS = ['ViT-L (best for Stable Diffusion 1.*)', 'ViT-H (best for Stable Diffusion 2.*)'] - -# download preprocessed files -PREPROCESS_FILES = [ - 'ViT-H-14_laion2b_s32b_b79k_artists.pkl', - 'ViT-H-14_laion2b_s32b_b79k_flavors.pkl', - 'ViT-H-14_laion2b_s32b_b79k_mediums.pkl', - 'ViT-H-14_laion2b_s32b_b79k_movements.pkl', - 'ViT-H-14_laion2b_s32b_b79k_trendings.pkl', - 'ViT-L-14_openai_artists.pkl', - 'ViT-L-14_openai_flavors.pkl', - 'ViT-L-14_openai_mediums.pkl', - 'ViT-L-14_openai_movements.pkl', - 'ViT-L-14_openai_trendings.pkl', -] -print("Download preprocessed cache files...") -for file in PREPROCESS_FILES: - path = hf_hub_download(repo_id="pharma/ci-preprocess", filename=file, cache_dir="cache") - cache_path = os.path.dirname(path) - - -# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14 -config = Config(cache_path=cache_path, clip_model_path="cache", clip_model_name="ViT-L-14/openai") -ci_vitl = Interrogator(config) -ci_vitl.clip_model = ci_vitl.clip_model.to("cpu") - -# load ViT-H https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K -config.blip_model = ci_vitl.blip_model -config.clip_model_name = "ViT-H-14/laion2b_s32b_b79k" -ci_vith = Interrogator(config) -ci_vith.clip_model = ci_vith.clip_model.to("cpu") - - -def image_analysis(image, clip_model_name): - # move selected model to GPU and other model to CPU - if clip_model_name == MODELS[0]: - ci_vith.clip_model = ci_vith.clip_model.to("cpu") - ci_vitl.clip_model = ci_vitl.clip_model.to(ci_vitl.device) - ci = ci_vitl - else: - ci_vitl.clip_model = ci_vitl.clip_model.to("cpu") - ci_vith.clip_model = ci_vith.clip_model.to(ci_vith.device) - ci = ci_vith - - image = image.convert('RGB') - image_features = ci.image_to_features(image) - - top_mediums = ci.mediums.rank(image_features, 5) - top_artists = ci.artists.rank(image_features, 5) - top_movements = ci.movements.rank(image_features, 5) - top_trendings = ci.trendings.rank(image_features, 5) - top_flavors = ci.flavors.rank(image_features, 5) - - medium_ranks = {medium: sim for medium, sim in zip(top_mediums, ci.similarities(image_features, top_mediums))} - artist_ranks = {artist: sim for artist, sim in zip(top_artists, ci.similarities(image_features, top_artists))} - movement_ranks = {movement: sim for movement, sim in zip(top_movements, ci.similarities(image_features, top_movements))} - trending_ranks = {trending: sim for trending, sim in zip(top_trendings, ci.similarities(image_features, top_trendings))} - flavor_ranks = {flavor: sim for flavor, sim in zip(top_flavors, ci.similarities(image_features, top_flavors))} - - return medium_ranks, artist_ranks, movement_ranks, trending_ranks, flavor_ranks - - -def image_to_prompt(image, clip_model_name, mode): - # move selected model to GPU and other model to CPU - if clip_model_name == MODELS[0]: - ci_vith.clip_model = ci_vith.clip_model.to("cpu") - ci_vitl.clip_model = ci_vitl.clip_model.to(ci_vitl.device) - ci = ci_vitl - else: - ci_vitl.clip_model = ci_vitl.clip_model.to("cpu") - ci_vith.clip_model = ci_vith.clip_model.to(ci_vith.device) - ci = ci_vith - - ci.config.blip_num_beams = 64 - ci.config.chunk_size = 2048 - ci.config.flavor_intermediate_count = 2048 if clip_model_name == MODELS[0] else 1024 - - image = image.convert('RGB') - if mode == 'best': - prompt = ci.interrogate(image) - elif mode == 'classic': - prompt = ci.interrogate_classic(image) - elif mode == 'fast': - prompt = ci.interrogate_fast(image) - elif mode == 'negative': - prompt = ci.interrogate_negative(image) - - return prompt, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - - -TITLE = """ -
      -
      -

      - CLIP Interrogator -

      -
      -

      - Want to figure out what a good prompt might be to create new images like an existing one?
      The CLIP Interrogator is here to get you answers! -

      -

      You can skip the queue by duplicating this space and upgrading to gpu in settings: Duplicate Space

      -
      -""" - -ARTICLE = """ -
      -

      - Example art by Layers - and Lin Tong - from pixabay.com -

      - -

      - Server busy? You can also run on Google Colab -

      - -

      - Has this been helpful to you? Follow me on twitter - @pharmapsychotic
      - and check out more tools at my - Ai generative art tools list -

      -
      -""" - -CSS = """ - #col-container {margin-left: auto; margin-right: auto;} - a {text-decoration-line: underline; font-weight: 600;} - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { transform: rotate(0deg); } - to { transform: rotate(360deg); } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } - #share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; - } - #share-btn-container .wrap { - display: none !important; - } -""" - -def analyze_tab(): - with gr.Column(): - with gr.Row(): - image = gr.Image(type='pil', label="Image") - model = gr.Dropdown(MODELS, value=MODELS[0], label='CLIP Model') - with gr.Row(): - medium = gr.Label(label="Medium", num_top_classes=5) - artist = gr.Label(label="Artist", num_top_classes=5) - movement = gr.Label(label="Movement", num_top_classes=5) - trending = gr.Label(label="Trending", num_top_classes=5) - flavor = gr.Label(label="Flavor", num_top_classes=5) - - button = gr.Button("Analyze", api_name="image-analysis") - button.click(image_analysis, inputs=[image, model], outputs=[medium, artist, movement, trending, flavor]) - - examples=[['example01.jpg', MODELS[0]], ['example02.jpg', MODELS[0]]] - ex = gr.Examples( - examples=examples, - fn=image_analysis, - inputs=[input_image, input_model], - outputs=[medium, artist, movement, trending, flavor], - cache_examples=True, - run_on_click=True - ) - ex.dataset.headers = [""] - - -with gr.Blocks(css=CSS) as block: - with gr.Column(elem_id="col-container"): - gr.HTML(TITLE) - - with gr.Tab("Prompt"): - with gr.Row(): - input_image = gr.Image(type='pil', elem_id="input-img") - with gr.Column(): - input_model = gr.Dropdown(MODELS, value=MODELS[0], label='CLIP Model') - input_mode = gr.Radio(['best', 'fast', 'classic', 'negative'], value='best', label='Mode') - submit_btn = gr.Button("Submit", api_name="image-to-prompt") - output_text = gr.Textbox(label="Output", elem_id="output-txt") - - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - examples=[['example01.jpg', MODELS[0], 'best'], ['example02.jpg', MODELS[0], 'best']] - ex = gr.Examples( - examples=examples, - fn=image_to_prompt, - inputs=[input_image, input_model, input_mode], - outputs=[output_text, share_button, community_icon, loading_icon], - cache_examples=True, - run_on_click=True - ) - ex.dataset.headers = [""] - - with gr.Tab("Analyze"): - analyze_tab() - - gr.HTML(ARTICLE) - - submit_btn.click( - fn=image_to_prompt, - inputs=[input_image, input_model, input_mode], - outputs=[output_text, share_button, community_icon, loading_icon] - ) - share_button.click(None, [], [], _js=share_js) - -block.queue(max_size=64).launch(show_api=False) diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/musicgen/musicgen_melody_32khz.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/musicgen/musicgen_melody_32khz.py deleted file mode 100644 index b0d6710a23c117406e9724057a62eccab88ce907..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/musicgen/musicgen_melody_32khz.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from ._explorers import LMExplorer -from ...environment import AudioCraftEnvironment - - -@LMExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=32, partition=partitions) - launcher.bind_(solver='musicgen/musicgen_melody_32khz') - # replace this by the desired music dataset - launcher.bind_(dset='internal/music_400k_32khz') - - fsdp = {'autocast': False, 'fsdp.use': True} - medium = {'model/lm/model_scale': 'medium'} - large = {'model/lm/model_scale': 'large'} - - cfg_low = {'classifier_free_guidance.training_dropout': 0.2} - wd_low = {'conditioners.description.t5.word_dropout': 0.2} - - adam = {'optim.optimizer': 'adamw', 'optim.lr': 1e-4} - - cache_path = {'conditioners.self_wav.chroma_stem.cache_path': - '/fsx-audio-craft-llm/jadecopet/experiments/audiocraft/caches/chroma_stem'} - - # CACHE GENERATION JOBS - n_cache_gen_jobs = 4 - gen_sub = launcher.slurm(gpus=1) - gen_sub.bind_( - cache_path, { - # the cache is always computed over the whole file, so duration doesn't matter here. - 'dataset.segment_duration': 2., - 'dataset.batch_size': 8, - 'dataset.train.permutation_on_files': True, # try to not repeat files. - 'optim.epochs': 10, - 'model/lm/model_scale': 'xsmall', - - }) - with gen_sub.job_array(): - for gen_job in range(n_cache_gen_jobs): - gen_sub({'dataset.train.shuffle_seed': gen_job}) - - # ACTUAL TRAINING JOBS. - launcher.bind_(fsdp) - - launcher.slurm_(gpus=32).bind_(label='32gpus') - with launcher.job_array(): - sub = launcher.bind() - sub() - sub(cache_path) - - launcher.slurm_(gpus=64).bind_(label='64gpus') - with launcher.job_array(): - sub = launcher.bind() - sub(medium, adam) - - launcher.slurm_(gpus=96).bind_(label='96gpus') - with launcher.job_array(): - sub = launcher.bind() - sub(large, cfg_low, wd_low, adam, {'optim.max_norm': 3}) diff --git a/spaces/mayordp/DeepFakeAI/DeepFakeAI/processors/frame/core.py b/spaces/mayordp/DeepFakeAI/DeepFakeAI/processors/frame/core.py deleted file mode 100644 index 8a44cb2413b53b88dec2d65667ef0e8b2fe11e72..0000000000000000000000000000000000000000 --- a/spaces/mayordp/DeepFakeAI/DeepFakeAI/processors/frame/core.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import sys -import importlib -import psutil -from concurrent.futures import ThreadPoolExecutor, as_completed -from queue import Queue -from types import ModuleType -from typing import Any, List, Callable -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import wording - -FRAME_PROCESSORS_MODULES : List[ModuleType] = [] -FRAME_PROCESSORS_METHODS =\ -[ - 'get_frame_processor', - 'clear_frame_processor', - 'pre_check', - 'pre_process', - 'process_frame', - 'process_frames', - 'process_image', - 'process_video', - 'post_process' -] - - -def load_frame_processor_module(frame_processor : str) -> Any: - try: - frame_processor_module = importlib.import_module('DeepFakeAI.processors.frame.modules.' + frame_processor) - for method_name in FRAME_PROCESSORS_METHODS: - if not hasattr(frame_processor_module, method_name): - raise NotImplementedError - except ModuleNotFoundError: - sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor)) - except NotImplementedError: - sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor)) - return frame_processor_module - - -def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: - global FRAME_PROCESSORS_MODULES - - if not FRAME_PROCESSORS_MODULES: - for frame_processor in frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - FRAME_PROCESSORS_MODULES.append(frame_processor_module) - return FRAME_PROCESSORS_MODULES - - -def clear_frame_processors_modules() -> None: - global FRAME_PROCESSORS_MODULES - - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - frame_processor_module.clear_frame_processor() - FRAME_PROCESSORS_MODULES = [] - - -def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None: - with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor: - futures = [] - queue = create_queue(temp_frame_paths) - queue_per_future = max(len(temp_frame_paths) // DeepFakeAI.globals.execution_thread_count * DeepFakeAI.globals.execution_queue_count, 1) - while not queue.empty(): - future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update) - futures.append(future) - for future in as_completed(futures): - future.result() - - -def create_queue(temp_frame_paths : List[str]) -> Queue[str]: - queue: Queue[str] = Queue() - for frame_path in temp_frame_paths: - queue.put(frame_path) - return queue - - -def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]: - queues = [] - for _ in range(queue_per_future): - if not queue.empty(): - queues.append(queue.get()) - return queues - - -def process_video(source_path : str, frame_paths : List[str], process_frames : Callable[[str, List[str], Any], None]) -> None: - progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]' - total = len(frame_paths) - with tqdm(total = total, desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress: - multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress)) - - -def update_progress(progress : Any = None) -> None: - process = psutil.Process(os.getpid()) - memory_usage = process.memory_info().rss / 1024 / 1024 / 1024 - progress.set_postfix( - { - 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB', - 'execution_providers': DeepFakeAI.globals.execution_providers, - 'execution_thread_count': DeepFakeAI.globals.execution_thread_count, - 'execution_queue_count': DeepFakeAI.globals.execution_queue_count - }) - progress.refresh() - progress.update(1) - - -def get_device() -> str: - if 'CUDAExecutionProvider' in DeepFakeAI.globals.execution_providers: - return 'cuda' - if 'CoreMLExecutionProvider' in DeepFakeAI.globals.execution_providers: - return 'mps' - return 'cpu' diff --git a/spaces/merve/measuring-fairness/public/anonymization/style-graph-scroll.css b/spaces/merve/measuring-fairness/public/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/merve/uncertainty-calibration/public/fill-in-the-blank/data/cachekey2filename.js b/spaces/merve/uncertainty-calibration/public/fill-in-the-blank/data/cachekey2filename.js deleted file mode 100644 index 85df2a5b1806c3853f4e12ab05b430af77c800f9..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/fill-in-the-blank/data/cachekey2filename.js +++ /dev/null @@ -1,19 +0,0 @@ -window.cacheKey2filename = { - "{\"tokens\":[101,2000,2022,2030,2025,2000,2022,29623,2008,2003,1996,3160,29628,102]}embed_group_top": "tokens-101-2000-2022-2030-2025-2000-2022-29623-2008-2003-1996-3160-29628-102-embed-group-top.json", - "{\"sentence\":\"In New York, they like to buy [MASK].\"}embed": "sentence-in-new-york-they-like-to-buy-mask-embed.json", - "{\"sentence\":\"Elsie was born in the year of [MASK].\"}embed": "sentence-elsie-was-born-in-the-year-of-mask-embed.json", - "{\"sentence\":\"Jim worked as a [MASK].\"}embed": "sentence-jim-worked-as-a-mask-embed.json", - "{\"sentence\":\"The new nurse was named [MASK].\"}embed": "sentence-the-new-nurse-was-named-mask-embed.json", - "{\"sentence\":\"The doctor performed CPR even though [MASK] knew it was too late.\"}embed_zari_cda": "sentence-the-doctor-performed-cpr-even-though-mask-knew-it-was-too-late-embed-zari-cda.json", - "{\"sentence\":\"In 1908, he was employed as a [MASK].\"}embed": "sentence-in-1908-he-was-employed-as-a-mask-embed.json", - "{\"sentence\":\"Jane worked as a [MASK].\"}embed": "sentence-jane-worked-as-a-mask-embed.json", - "{\"sentence\":\"In Texas, they like to buy [MASK].\"}embed": "sentence-in-texas-they-like-to-buy-mask-embed.json", - "{\"sentence\":\"Lauren was born in the year of [MASK].\"}embed": "sentence-lauren-was-born-in-the-year-of-mask-embed.json", - "{\"sentence\":\"The new doctor was named [MASK].\"}embed": "sentence-the-new-doctor-was-named-mask-embed.json", - "{\"sentence\":\"The nurse performed CPR even though [MASK] knew it was too late.\"}embed_zari_cda": "sentence-the-nurse-performed-cpr-even-though-mask-knew-it-was-too-late-embed-zari-cda.json", - "{\"sentence\":\"In 1908, she was employed as a [MASK].\"}embed": "sentence-in-1908-she-was-employed-as-a-mask-embed.json", - "{\"sentence\":\"In 2018, he was employed as a [MASK].\"}embed": "sentence-in-2018-he-was-employed-as-a-mask-embed.json", - "{\"sentence\":\"In 2018, she was employed as a [MASK].\"}embed": "sentence-in-2018-she-was-employed-as-a-mask-embed.json", - "{\"tokens\":[101,1999,2047,2259,29623,2027,2066,2000,4965,2477,29625,102]}embed_group_top": "tokens-101-1999-2047-2259-29623-2027-2066-2000-4965-2477-29625-102-embed-group-top.json", - "{\"tokens\":[101,1999,3146,29623,2027,2066,2000,4965,2477,29625,102]}embed_group_top": "tokens-101-1999-3146-29623-2027-2066-2000-4965-2477-29625-102-embed-group-top.json" -} \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/source/_posts/2021-03-03-fill-in-the-blank.md b/spaces/merve/uncertainty-calibration/source/_posts/2021-03-03-fill-in-the-blank.md deleted file mode 100644 index c5a251a9297e84f8b3ed4e504ff25f19793a57c2..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/_posts/2021-03-03-fill-in-the-blank.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -template: post.html -title: What Have Language Models Learned? -summary: By asking language models to fill in the blank, we can probe their understanding of the world. -shareimg: https://pair.withgoogle.com/explorables/images/fill-in-the-blank.png -shareimgabstract: https://pair.withgoogle.com/explorables/images/fill-in-the-blank-abstract.png -permalink: /fill-in-the-blank/ -date: 2021-07-28 ---- - -Large language models are making it possible for computers to [write stories](https://openai.com/blog/better-language-models/), [program a website](https://twitter.com/sharifshameem/status/1282676454690451457) and [turn captions into images](https://openai.com/blog/dall-e/). - -One of the first of these models, [BERT](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html), is trained by taking sentences, splitting them into individual words, randomly hiding some of them, and predicting what the hidden words are. After doing this millions of times, BERT has "read" enough Shakespeare to predict how this phrase usually ends: - -
      - -This page is hooked up to a version of BERT trained on Wikipedia and books.¹ Try clicking on different words to see how they'd be filled in or typing in another sentence to see what else has BERT picked up on. - -
      - -### Cattle or Clothes? - -Besides Hamlet's existential dread, the text BERT was trained on also contains more patterns: - -
      - -Cattle and horses aren't top purchase predictions in every state, though! In New York, some of the most likely words are clothes, books and art: - -
      - -There are more than 30,000 words, punctuation marks and word fragments in BERT's [vocabulary](https://huggingface.co/transformers/tokenizer_summary.html). Every time BERT fills in a hidden word, it assigns each of them a probability. By looking at how slightly different sentences shift those probabilities, we can get a glimpse at how purchasing patterns in different places are understood. - -
      - -You can **edit these sentences**. Or try one of these comparisons to get started: - -To the extent that a computer program can "know" something, what does BERT know about where you live? -### What's in a Name? - -This technique can also probe what associations BERT has learned about different groups of people. For example, it predicts people named Elsie are older than people named Lauren: - -
      - -It's also learned that people named Jim have more [typically masculine](https://flowingdata.com/2017/09/11/most-female-and-male-occupations-since-1950/) jobs than people named Jane: - -
      - -These aren't just spurious correlations — Elsies really are more likely to be [older](https://rhiever.github.io/name-age-calculator/) than Laurens. And occupations the model associates with feminine names are held by a [higher percentage](https://purehost.bath.ac.uk/ws/portalfiles/portal/168480066/CaliskanEtAl_authors_full.pdf ) of women. - -Should we be concerned about these correlations? BERT was trained to fill in blanks in Wikipedia articles and books — it does a great job at that! The problem is that the internal representations of language these models have learned are used for much more – by some [measures](https://super.gluebenchmark.com/leaderboard), they're the best way we have of getting computers to understand and manipulate text. - -We wouldn't hesitate to call a conversation partner or recruiter who blithely assumed that doctors are men sexist, but that's exactly what BERT might do if heedlessly incorporated into a chatbot or HR software: - -
      - -Adjusting for assumptions like this isn't trivial. *Why* machine learning systems produce a given output still isn't well understood – determining if a credit model built on top of BERT rejected a loan application because of [gender discrimation](https://pair.withgoogle.com/explorables/hidden-bias/) might be quite difficult. - -Deploying large language models at scale also risks [amplifying](https://machinesgonewrong.com/bias_i/#harms-of-representation) and [perpetuating](http://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf) today's harmful stereotypes. When [prompted](https://arxiv.org/pdf/2101.05783v1.pdf#page=3) with "Two Muslims walked into a…", for example, [GPT-3](https://en.wikipedia.org/wiki/GPT-3) typically finishes the sentence with descriptions of violence. -### How Can We Fix This? - -One conceptually straightforward approach: reduce unwanted correlations from the training data to [mitigate](https://arxiv.org/abs/1906.08976) model [bias](https://arxiv.org/abs/2005.14050). - -Last year a version of BERT called [Zari](https://ai.googleblog.com/2020/10/measuring-gendered-correlations-in-pre.html) was [trained](https://arxiv.org/pdf/2010.06032.pdf#page=6) with an additional set of generated sentences. For every sentence with a [gendered noun](https://github.com/uclanlp/corefBias/blob/master/WinoBias/wino/generalized_swaps.txt), like boy or aunt, another sentence that replaced the noun with its gender-partner was added to the training data: in addition to "The *lady* doth protest too much," Zari was also trained on "The *gentleman* doth protest too much." - -
      - -Unlike BERT, Zari assigns nurses and doctors an equal probability of being a "she" or a "he" after being trained on the swapped sentences. This approach hasn't removed all the gender correlations; because names weren't swapped, Zari's association between masculine names and doctors has only slightly decreased from BERT's. And the retraining doesn't change how the model understands nonbinary gender. - -Something similar happened with [other attempts](https://arxiv.org/abs/1607.06520) to remove gender bias from models' representations of words. It's possible to mathematically define bias and perform "brain surgery" on a model to remove it, but language is steeped in gender. Large models can have billions of parameters in which to learn stereotypes — slightly different measures of bias have found the retrained models only [shifted the stereotypes](https://www.aclweb.org/anthology/N19-1061/) around to be undetectable by the initial measure. - -As with [other applications](https://pair.withgoogle.com/explorables/measuring-fairness/) of machine learning, it's helpful to focus instead on the actual harms that could occur. Tools like [AllenNLP](https://allennlp.org/), [LMdiff](http://lmdiff.net/) and the [Language Interpretability Tool](https://pair-code.github.io/lit/) make it easier to interact with language models to find where they might be falling short. Once those shortcomings are spotted, [task specific](https://arxiv.org/abs/2004.07667) mitigation measures can be simpler to apply than modifying the entire model. - -It's also possible that as models grow more capable, they might be able to [explain](https://arxiv.org/abs/2004.14546) and perform some of this debiasing themselves. Instead of forcing the model to tell us the gender of "the doctor," we could let it respond with [uncertainty](https://arr.am/2020/07/25/gpt-3-uncertainty-prompts/) that's [shown to the user](https://ai.googleblog.com/2018/12/providing-gender-specific-translations.html) and controls to override assumptions. - -### Credits - -Adam Pearce // July 2021 - -Thanks to Ben Wedin, Emily Reif, James Wexler, Fernanda Viégas, Ian Tenney, Kellie Webster, Kevin Robinson, Lucas Dixon, Ludovic Peran, Martin Wattenberg, Michael Terry, Tolga Bolukbasi, Vinodkumar Prabhakaran, Xuezhi Wang, Yannick Assogba, and Zan Armstrong for their help with this piece. - -### Footnotes - - The BERT model used on this page is the Hugging Face version of [bert-large-uncased-whole-word-masking](https://huggingface.co/bert-large-uncased-whole-word-masking). "BERT" also refers to a type of model architecture; hundreds of BERT models have been [trained and published](https://huggingface.co/models?filter=bert). The model and chart code used here are available on [GitHub](https://github.com/PAIR-code/ai-explorables). - - Notice that "1800", "1900" and "2000" are some of the top predictions, though. People aren't actually more likely to be born at the start of a century, but in BERT's training corpus of books and Wikipedia articles round numbers are [more common](https://blocks.roadtolarissa.com/1wheel/cea123a8c17d51d9dacbd1c17e6fe601).

      - -Comparing BERT and Zari in this interface requires carefully tracking tokens during a transition. The [BERT Difference Plots](https://colab.research.google.com/drive/1xfPGKqjdE635cVSi-Ggt-cRBU5pyJNWP) colab has ideas for extensions to systemically look at differences between the models' output. - - This analysis shouldn't stop once a model is deployed — as language and model usage shifts, it's important to continue studying and mitigating potential harms. - - -### Appendix: Differences Over Time - -In addition to looking at how predictions for men and women are different for a given sentence, we can also chart how those differences have changed over time: - -
      - -The convergence in more recent years suggests another potential mitigation technique: using a prefix to steer the model away from unwanted correlations while preserving its understanding of natural language. - -Using "In $year" as the prefix is quite limited, though, as it doesn't handle gender-neutral pronouns and potentially [increases](https://www.pnas.org/content/pnas/115/16/E3635.full.pdf#page=8) other correlations. However, it may be possible to [find a better prefix](https://arxiv.org/abs/2104.08691) that mitigates a specific type of bias with just a [couple of dozen examples](https://www.openai.com/blog/improving-language-model-behavior/ ). - -
      - -Closer examination of these differences in differences also shows there's a limit to the facts we can pull out of BERT this way. - -Below, the top row of charts shows how predicted differences in occupations between men and women change between 1908 and 2018. The rightmost chart shows the he/she difference in 1908 against the he/she difference in 2018. - -The flat slope of the rightmost chart indicates that the he/she difference has decreased for each job by about the same amount. But in reality, [shifts in occupation](https://www.weforum.org/agenda/2016/03/a-visual-history-of-gender-and-employment) weren't nearly so smooth and some occupations, like accounting, switched from being majority male to majority female. - -
      - -This reality-prediction mismatch could be caused by lack of training data, model size or the coarseness of the probing method. There's an immense amount of general knowledge inside of these models — with a little bit of focused training, they can even become expert [trivia](https://t5-trivia.glitch.me/) players. -### More Explorables - -

      - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/source/measuring-fairness/slides.js b/spaces/merve/uncertainty-calibration/source/measuring-fairness/slides.js deleted file mode 100644 index a66a04c7c483fee37424c6e9182e565a673a7aca..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/measuring-fairness/slides.js +++ /dev/null @@ -1,102 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -window.makeSlides = function(){ - var slides = [ - { - textFill: '#aaa', - textStroke: 0, - rectFill: d => d.isSick ? lcolors.sick : lcolors.well, - rectOpacity: d => 0, - threshold: .8, - fpAxisOpacity: 0, - sexAxisOpacity: 0, - brAxisOpacity: 0, - truthAxisOpacity: 0, - mlAxisOpacity: 0, - pos: 'all', - botAxisY: c.width + 80, - }, - - { - textFill: d => d.isSick ? colors.sick : colors.well, - truthAxisOpacity: 1, - }, - - { - rectOpacity: d => 1, - mlAxisOpacity: 1, - - }, - - { - rectFill: d => d.grade > gs.curSlide.threshold ? lcolors.sick : lcolors.well, - textStroke: d => d.grade > gs.curSlide.threshold == d.isSick ? 0 : .6, - fpAxisOpacity: 1, - }, - - { - threshold: .61, - animateThreshold: true, - }, - - { - threshold: .89, - animateThreshold: true, - }, - - { - pos: 'sex', - fpAxisOpacity: 0, - sexAxisOpacity: 1, - threshold: .7508, - animateThreshold: false, - botAxisY: c.width + 150, - - }, - - { - brAxisOpacity: 1, - sexAxisOpacity: 0, - - }, - - { - - } - - ] - - var keys = [] - slides.forEach(d => keys = keys.concat(d3.keys(d))) - _.uniq(keys).forEach(str => { - var prev = null - slides.forEach(d => { - if (typeof(d[str]) === 'undefined'){ - d[str] = prev - } - prev = d[str] - }) - }) - - return slides -} - - - -if (window.init) window.init() diff --git a/spaces/mithril-security/Santacoder-demo/README.md b/spaces/mithril-security/Santacoder-demo/README.md deleted file mode 100644 index 59396fe839fbebd349fb7b7c993dce831ea8b52e..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/Santacoder-demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Santacoder Demo -emoji: 📉 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/miyaaa666/bingo/src/components/chat-scroll-anchor.tsx b/spaces/miyaaa666/bingo/src/components/chat-scroll-anchor.tsx deleted file mode 100644 index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000 --- a/spaces/miyaaa666/bingo/src/components/chat-scroll-anchor.tsx +++ /dev/null @@ -1,29 +0,0 @@ -'use client' - -import * as React from 'react' -import { useInView } from 'react-intersection-observer' - -import { useAtBottom } from '@/lib/hooks/use-at-bottom' - -interface ChatScrollAnchorProps { - trackVisibility?: boolean -} - -export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) { - const isAtBottom = useAtBottom() - const { ref, entry, inView } = useInView({ - trackVisibility, - delay: 100, - rootMargin: '0px 0px -150px 0px' - }) - - React.useEffect(() => { - if (isAtBottom && trackVisibility && !inView) { - entry?.target.scrollIntoView({ - block: 'start' - }) - } - }, [inView, entry, isAtBottom, trackVisibility]) - - return
      -} diff --git a/spaces/mmnga/vocabviewer/app.py b/spaces/mmnga/vocabviewer/app.py deleted file mode 100644 index e9f1d13a11c77c4b4ecf3e8cc76a3d45ebfc9b3f..0000000000000000000000000000000000000000 --- a/spaces/mmnga/vocabviewer/app.py +++ /dev/null @@ -1,52 +0,0 @@ - -def vocab(data,path, length): - name = "" - size = 0 - with open(path) as f: - _, size, name = f.readline().split("\t") - f.readline() - column = f"{name}({size})" - data[column] = [] - for i in range(int(size)): - [text] = f.readline().split("\t")[-1:] - data[column].append(text.rstrip()) - - for i in range(length-int(size)): - data[column].append("") - -data = {} -vocab(data,"vocab_00.txt", 65536) -vocab(data,"vocab_01.txt", 65536) -vocab(data,"vocab_02.txt", 65536) -vocab(data,"vocab_03.txt", 65536) -vocab(data,"vocab_04.txt", 65536) -vocab(data,"vocab_05.txt", 65536) -vocab(data,"vocab_08.txt", 65536) - -# data_large = {} -# vocab(data_large,"vocab_06_qwen.txt", 151643) -# vocab(data_large,"vocab_07_cl100k_base.txt", 151643) - -# data = {} -# vocab(data,"vocab_00.txt", 151643) -# vocab(data,"vocab_01.txt", 151643) -# vocab(data,"vocab_02.txt", 151643) -# vocab(data,"vocab_03.txt", 151643) -# vocab(data,"vocab_04.txt", 151643) -# vocab(data,"vocab_05.txt", 151643) - -# vocab(data,"vocab_07_cl100k_base.txt", 151643) -# vocab(data,"vocab_06_qwen.txt", 151643) - -# print(data) -import streamlit as st -import pandas as pd -st.title("jp vocab viewer") -st.text("""日本語LLM各種のvocabの一覧です。 This is a list of Japanese LLM vocab. -(stablelm-jpのtokenizerはnovelai/nerdstash-tokenizer-v1を使用しています。)""") - -df = pd.DataFrame(data) -st.dataframe(df, height=800, width=1000) - -# df2 = pd.DataFrame(data_large) -# st.dataframe(df2, height=800) diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py deleted file mode 100644 index 3b178676ba322ef613df42977cb498101f841b09..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import librosa -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.data -import torchaudio - - -EMBEDDER_PARAMS = { - 'num_mels': 40, - 'n_fft': 512, - 'emb_dim': 256, - 'lstm_hidden': 768, - 'lstm_layers': 3, - 'window': 80, - 'stride': 40, -} - - -def set_requires_grad(nets, requires_grad=False): - """Set requies_grad=Fasle for all the networks to avoid unnecessary - computations - Parameters: - nets (network list) -- a list of networks - requires_grad (bool) -- whether the networks require gradients or not - """ - if not isinstance(nets, list): - nets = [nets] - for net in nets: - if net is not None: - for param in net.parameters(): - param.requires_grad = requires_grad - - -class LinearNorm(nn.Module): - def __init__(self, hp): - super(LinearNorm, self).__init__() - self.linear_layer = nn.Linear(hp["lstm_hidden"], hp["emb_dim"]) - - def forward(self, x): - return self.linear_layer(x) - - -class SpeechEmbedder(nn.Module): - def __init__(self, hp): - super(SpeechEmbedder, self).__init__() - self.lstm = nn.LSTM(hp["num_mels"], - hp["lstm_hidden"], - num_layers=hp["lstm_layers"], - batch_first=True) - self.proj = LinearNorm(hp) - self.hp = hp - - def forward(self, mel): - # (num_mels, T) -> (num_mels, T', window) - mels = mel.unfold(1, self.hp["window"], self.hp["stride"]) - mels = mels.permute(1, 2, 0) # (T', window, num_mels) - x, _ = self.lstm(mels) # (T', window, lstm_hidden) - x = x[:, -1, :] # (T', lstm_hidden), use last frame only - x = self.proj(x) # (T', emb_dim) - x = x / torch.norm(x, p=2, dim=1, keepdim=True) # (T', emb_dim) - - x = x.mean(dim=0) - if x.norm(p=2) != 0: - x = x / x.norm(p=2) - return x - - -class SpkrEmbedder(nn.Module): - RATE = 16000 - - def __init__( - self, - embedder_path, - embedder_params=EMBEDDER_PARAMS, - rate=16000, - hop_length=160, - win_length=400, - pad=False, - ): - super(SpkrEmbedder, self).__init__() - embedder_pt = torch.load(embedder_path, map_location="cpu") - self.embedder = SpeechEmbedder(embedder_params) - self.embedder.load_state_dict(embedder_pt) - self.embedder.eval() - set_requires_grad(self.embedder, requires_grad=False) - self.embedder_params = embedder_params - - self.register_buffer('mel_basis', torch.from_numpy( - librosa.filters.mel( - sr=self.RATE, - n_fft=self.embedder_params["n_fft"], - n_mels=self.embedder_params["num_mels"]) - ) - ) - - self.resample = None - if rate != self.RATE: - self.resample = torchaudio.transforms.Resample(rate, self.RATE) - self.hop_length = hop_length - self.win_length = win_length - self.pad = pad - - def get_mel(self, y): - if self.pad and y.shape[-1] < 14000: - y = F.pad(y, (0, 14000 - y.shape[-1])) - - window = torch.hann_window(self.win_length).to(y) - y = torch.stft(y, n_fft=self.embedder_params["n_fft"], - hop_length=self.hop_length, - win_length=self.win_length, - window=window) - magnitudes = torch.norm(y, dim=-1, p=2) ** 2 - mel = torch.log10(self.mel_basis @ magnitudes + 1e-6) - return mel - - def forward(self, inputs): - dvecs = [] - for wav in inputs: - mel = self.get_mel(wav) - if mel.dim() == 3: - mel = mel.squeeze(0) - dvecs += [self.embedder(mel)] - dvecs = torch.stack(dvecs) - - dvec = torch.mean(dvecs, dim=0) - dvec = dvec / torch.norm(dvec) - - return dvec diff --git a/spaces/mthsk/sovits-models-misc/inference_main.py b/spaces/mthsk/sovits-models-misc/inference_main.py deleted file mode 100644 index 3b2c32ac9e29e6b016e656e937fede5d2c23e7e6..0000000000000000000000000000000000000000 --- a/spaces/mthsk/sovits-models-misc/inference_main.py +++ /dev/null @@ -1,130 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - - - -def main(): - import argparse - - parser = argparse.ArgumentParser(description='sovits4 inference') - - # 一定要设置的部分 - parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径') - parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径') - parser.add_argument('-cl', '--clip', type=float, default=0, help='音频强制切片,默认0为自动切片,单位为秒/s') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称') - - # 可选项部分 - parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可') - parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒') - parser.add_argument('-fmp', '--f0_mean_pooling', type=bool, default=False, help='是否对F0使用均值滤波器(池化),对部分哑音有改善。注意,启动该选项会导致推理速度下降,默认关闭') - - # 不用动的部分 - parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式') - parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭') - - args = parser.parse_args() - - svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path) - infer_tool.mkdir(["raw", "results"]) - clean_names = args.clean_names - trans = args.trans - spk_list = args.spk_list - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - clip = args.clip - lg = args.linear_gradient - lgr = args.linear_gradient_retain - F0_mean_pooling = args.f0_mean_pooling - - infer_tool.fill_a_to_b(trans, clean_names) - for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip*audio_sr) - lg_size = int(lg*audio_sr) - lg_size_r = int(lg_size*lgr) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - audio.extend(list(infer_tool.pad_array(_audio, length))) - continue - if per_size != 0: - datas = infer_tool.split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length - if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, dat, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = svc_model.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - F0_mean_pooling = F0_mean_pooling - ) - _audio = out_audio.cpu().numpy() - pad_len = int(svc_model.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - _audio = infer_tool.pad_array(_audio, per_length) - if lg_size!=0 and k!=0: - lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:] - lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size] - lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size] - audio.extend(lg_pre) - _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:] - audio.extend(list(_audio)) - key = "auto" if auto_predict_f0 else f"{tran}key" - cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" - res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) - -if __name__ == '__main__': - main() diff --git a/spaces/multimodalart/pix2pix-zero/src/utils/edit_directions.py b/spaces/multimodalart/pix2pix-zero/src/utils/edit_directions.py deleted file mode 100644 index 90a0f7e54e2cd498de89ae4ab2e4ea155586bf7b..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/pix2pix-zero/src/utils/edit_directions.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import torch - - -""" -This function takes in a task name and returns the direction in the embedding space that transforms class A to class B for the given task. - -Parameters: -task_name (str): name of the task for which direction is to be constructed. - -Returns: -torch.Tensor: A tensor representing the direction in the embedding space that transforms class A to class B. - -Examples: ->>> construct_direction("cat2dog") -""" -def construct_direction(task_name): - if task_name=="cat2dog": - emb_dir = f"assets/embeddings_sd_1.4" - embs_a = torch.load(os.path.join(emb_dir, f"cat.pt")) - embs_b = torch.load(os.path.join(emb_dir, f"dog.pt")) - return (embs_b.mean(0)-embs_a.mean(0)).unsqueeze(0) - elif task_name=="dog2cat": - emb_dir = f"assets/embeddings_sd_1.4" - embs_a = torch.load(os.path.join(emb_dir, f"dog.pt")) - embs_b = torch.load(os.path.join(emb_dir, f"cat.pt")) - return (embs_b.mean(0)-embs_a.mean(0)).unsqueeze(0) - else: - raise NotImplementedError - -def construct_direction_prompts(source_prompt_embeddings, target_prompt_embeddings): - return((target_prompt_embeddings.mean(0)-source_prompt_embeddings.mean(0)).unsqueeze(0)) \ No newline at end of file diff --git a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/postcss.config.js b/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/postcss.config.js deleted file mode 100644 index 064a2ba5ff820c6b2328f51f0ae6b147ec698881..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/postcss.config.js +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// All rights reserved. - -// This source code is licensed under the license found in the -// LICENSE file in the root directory of this source tree. - -const tailwindcss = require("tailwindcss"); -module.exports = { - plugins: ["postcss-preset-env", 'tailwindcss/nesting', tailwindcss], -}; diff --git a/spaces/nakamura196/yolov5-ndl-layout/ultralytics/yolov5/utils/aws/mime.sh b/spaces/nakamura196/yolov5-ndl-layout/ultralytics/yolov5/utils/aws/mime.sh deleted file mode 100644 index c319a83cfbdf09bea634c3bd9fca737c0b1dd505..0000000000000000000000000000000000000000 --- a/spaces/nakamura196/yolov5-ndl-layout/ultralytics/yolov5/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/spaces/nateraw/jupyterlab-test2/Dockerfile b/spaces/nateraw/jupyterlab-test2/Dockerfile deleted file mode 100644 index 3960193ab63bdef0ffea33b509bc364b4c12b8e4..0000000000000000000000000000000000000000 --- a/spaces/nateraw/jupyterlab-test2/Dockerfile +++ /dev/null @@ -1,100 +0,0 @@ -FROM nvidia/cuda:11.3.1-base-ubuntu20.04 - -ENV DEBIAN_FRONTEND=noninteractive \ - TZ=Europe/Paris - -# Remove any third-party apt sources to avoid issues with expiring keys. -# Install some basic utilities -RUN rm -f /etc/apt/sources.list.d/*.list && \ - apt-get update && apt-get install -y --no-install-recommends \ - curl \ - ca-certificates \ - sudo \ - git \ - git-lfs \ - zip \ - unzip \ - htop \ - bzip2 \ - libx11-6 \ - build-essential \ - libsndfile-dev \ - software-properties-common \ - && rm -rf /var/lib/apt/lists/* - -RUN add-apt-repository ppa:flexiondotorg/nvtop && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends nvtop - -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - && \ - apt-get install -y nodejs && \ - npm install -g configurable-http-proxy - -# Create a working directory -WORKDIR /app - -# Create a non-root user and switch to it -RUN adduser --disabled-password --gecos '' --shell /bin/bash user \ - && chown -R user:user /app -RUN echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user -USER user - -# All users can use /home/user as their home directory -ENV HOME=/home/user -RUN mkdir $HOME/.cache $HOME/.config \ - && chmod -R 777 $HOME - -# Set up the Conda environment -ENV CONDA_AUTO_UPDATE_CONDA=false \ - PATH=$HOME/miniconda/bin:$PATH -RUN curl -sLo ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-py39_4.10.3-Linux-x86_64.sh \ - && chmod +x ~/miniconda.sh \ - && ~/miniconda.sh -b -p ~/miniconda \ - && rm ~/miniconda.sh \ - && conda clean -ya - -WORKDIR $HOME/app - -####################################### -# Start root user section -####################################### - -USER root - -# User Debian packages -## Security warning : Potential user code executed as root (build time) -RUN --mount=target=/root/packages.txt,source=packages.txt \ - apt-get update && \ - xargs -r -a /root/packages.txt apt-get install -y --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -RUN --mount=target=/root/on_startup.sh,source=on_startup.sh,readwrite \ - bash /root/on_startup.sh - -####################################### -# End root user section -####################################### - -USER user - -# Python packages -RUN pip install --no-cache-dir torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 -RUN --mount=target=requirements.txt,source=requirements.txt \ - pip install --no-cache-dir --upgrade -r requirements.txt - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -RUN chmod +x start_server.sh - -COPY --chown=user login.html /home/user/miniconda/lib/python3.9/site-packages/jupyter_server/templates/login.html - -ENV PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces \ - SHELL=/bin/bash - -CMD ["./start_server.sh"] diff --git a/spaces/naver/SuperFeatures/how/stages/train.py b/spaces/naver/SuperFeatures/how/stages/train.py deleted file mode 100644 index 44638738197814baefda2610acfeb97fa233567a..0000000000000000000000000000000000000000 --- a/spaces/naver/SuperFeatures/how/stages/train.py +++ /dev/null @@ -1,241 +0,0 @@ -"""Implements training new models""" - -import time -import copy -from collections import defaultdict -import numpy as np -import torch -import torchvision.transforms as transforms - -from cirtorch.layers.loss import ContrastiveLoss -from cirtorch.datasets.datahelpers import collate_tuples -from cirtorch.datasets.traindataset import TuplesDataset -from cirtorch.datasets.genericdataset import ImagesFromList - -from ..networks import how_net -from ..utils import data_helpers, io_helpers, logging, plots -from . import evaluate - - -def train(demo_train, training, validation, model, globals): - """Demo training a network - - :param dict demo_train: Demo-related options - :param dict training: Training options - :param dict validation: Validation options - :param dict model: Model options - :param dict globals: Global options - """ - logger = globals["logger"] - (globals["exp_path"] / "epochs").mkdir(exist_ok=True) - if (globals["exp_path"] / f"epochs/model_epoch{training['epochs']}.pth").exists(): - logger.info("Skipping network training, already trained") - return - - # Global setup - set_seed(0) - globals["device"] = torch.device("cpu") - if demo_train['gpu_id'] is not None: - globals["device"] = torch.device(("cuda:%s" % demo_train['gpu_id'])) - - # Initialize network - net = how_net.init_network(**model).to(globals["device"]) - globals["transform"] = transforms.Compose([transforms.ToTensor(), \ - transforms.Normalize(**dict(zip(["mean", "std"], net.runtime['mean_std'])))]) - with logging.LoggingStopwatch("initializing network whitening", logger.info, logger.debug): - initialize_dim_reduction(net, globals, **training['initialize_dim_reduction']) - - # Initialize training - optimizer, scheduler, criterion, train_loader = \ - initialize_training(net.parameter_groups(training["optimizer"]), training, globals) - validation = Validation(validation, globals) - - for epoch in range(training['epochs']): - epoch1 = epoch + 1 - set_seed(epoch1) - - time0 = time.time() - train_loss = train_epoch(train_loader, net, globals, criterion, optimizer, epoch1) - - validation.add_train_loss(train_loss, epoch1) - validation.validate(net, epoch1) - - scheduler.step() - - io_helpers.save_checkpoint({ - 'epoch': epoch1, 'meta': net.meta, 'state_dict': net.state_dict(), - 'optimizer' : optimizer.state_dict(), 'best_score': validation.best_score[1], - 'scores': validation.scores, 'net_params': model, '_version': 'how/2020', - }, validation.best_score[0] == epoch1, epoch1 == training['epochs'], globals["exp_path"] / "epochs") - - logger.info(f"Epoch {epoch1} finished in {time.time() - time0:.1f}s") - - -def train_epoch(train_loader, net, globals, criterion, optimizer, epoch1): - """Train for one epoch""" - logger = globals['logger'] - batch_time = data_helpers.AverageMeter() - data_time = data_helpers.AverageMeter() - losses = data_helpers.AverageMeter() - - # Prepare epoch - train_loader.dataset.create_epoch_tuples(net) - net.train() - - end = time.time() - for i, (input, target) in enumerate(train_loader): - data_time.update(time.time() - end) - optimizer.zero_grad() - - num_images = len(input[0]) # number of images per tuple - for inp, trg in zip(input, target): - output = torch.zeros(net.meta['outputdim'], num_images).to(globals["device"]) - for imi in range(num_images): - output[:, imi] = net(inp[imi].to(globals["device"])).squeeze() - loss = criterion(output, trg.to(globals["device"])) - loss.backward() - losses.update(loss.item()) - - optimizer.step() - batch_time.update(time.time() - end) - end = time.time() - - if (i+1) % 20 == 0 or i == 0 or (i+1) == len(train_loader): - logger.info(f'>> Train: [{epoch1}][{i+1}/{len(train_loader)}]\t' \ - f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \ - f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' \ - f'Loss {losses.val:.4f} ({losses.avg:.4f})') - - return losses.avg - - -def set_seed(seed): - """Sets given seed globally in used libraries""" - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(seed) - np.random.seed(seed) - - -def initialize_training(net_parameters, training, globals): - """Initialize classes necessary for training""" - # Need to check for keys because of defaults - assert training['optimizer'].keys() == {"lr", "weight_decay"} - assert training['lr_scheduler'].keys() == {"gamma"} - assert training['loss'].keys() == {"margin"} - assert training['dataset'].keys() == {"name", "mode", "imsize", "nnum", "qsize", "poolsize"} - assert training['loader'].keys() == {"batch_size"} - - optimizer = torch.optim.Adam(net_parameters, **training["optimizer"]) - scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, **training["lr_scheduler"]) - criterion = ContrastiveLoss(**training["loss"]).to(globals["device"]) - train_dataset = TuplesDataset(**training['dataset'], transform=globals["transform"]) - train_loader = torch.utils.data.DataLoader(train_dataset, **training['loader'], \ - pin_memory=True, drop_last=True, shuffle=True, collate_fn=collate_tuples, \ - num_workers=how_net.NUM_WORKERS) - return optimizer, scheduler, criterion, train_loader - - - -def extract_train_descriptors(net, globals, *, images, features_num): - """Extract descriptors for a given number of images from the train set""" - if features_num is None: - features_num = net.runtime['features_num'] - - images = data_helpers.load_dataset('train', data_root=globals['root_path'])[0][:images] - dataset = ImagesFromList(root='', images=images, imsize=net.runtime['image_size'], bbxs=None, - transform=globals["transform"]) - des_train = how_net.extract_vectors_local(net, dataset, globals["device"], - scales=net.runtime['training_scales'], - features_num=features_num)[0] - return des_train - - -def initialize_dim_reduction(net, globals, **kwargs): - """Initialize dimensionality reduction by PCA whitening from 'images' number of descriptors""" - if not net.dim_reduction: - return - - print(">> Initializing dim reduction") - des_train = extract_train_descriptors(net.copy_excluding_dim_reduction(), globals, **kwargs) - net.dim_reduction.initialize_pca_whitening(des_train) - - -class Validation: - """A convenient interface to validation, keeping historical values and plotting continuously - - :param dict validations: Options for each validation type (e.g. local_descriptor) - :param dict globals: Global options - """ - - methods = { - "global_descriptor": evaluate.eval_global, - "local_descriptor": evaluate.eval_asmk, - } - - def __init__(self, validations, globals): - validations = copy.deepcopy(validations) - self.frequencies = {x: y.pop("frequency") for x, y in validations.items()} - self.validations = validations - self.globals = globals - self.scores = {x: defaultdict(list) for x in validations} - self.scores["train_loss"] = [] - - def add_train_loss(self, loss, epoch): - """Store training loss for given epoch""" - self.scores['train_loss'].append((epoch, loss)) - - fig = plots.EpochFigure("train set", ylabel="loss") - fig.plot(*list(zip(*self.scores["train_loss"])), 'o-', label='train') - fig.save(self.globals['exp_path'] / "fig_train.jpg") - - def validate(self, net, epoch): - """Perform validation of the network and store the resulting score for given epoch""" - for name, frequency in self.frequencies.items(): - if frequency and epoch % frequency == 0: - scores = self.methods[name](net, net.runtime, self.globals, **self.validations[name]) - for dataset, values in scores.items(): - value = values['map_medium'] if "map_medium" in values else values['map'] - self.scores[name][dataset].append((epoch, value)) - - if "val_eccv20" in scores: - fig = plots.EpochFigure(f"val set - {name}", ylabel="mAP") - fig.plot(*list(zip(*self.scores[name]['val_eccv20'])), 'o-', label='val') - fig.save(self.globals['exp_path'] / f"fig_val_{name}.jpg") - - if scores.keys() - {"val_eccv20"}: - fig = plots.EpochFigure(f"test set - {name}", ylabel="mAP") - for dataset, value in self.scores[name].items(): - if dataset != "val_eccv20": - fig.plot(*list(zip(*value)), 'o-', label=dataset) - fig.save(self.globals['exp_path'] / f"fig_test_{name}.jpg") - - @property - def decisive_scores(self): - """List of pairs (epoch, score) where score is decisive for comparing epochs""" - for name in ["local_descriptor", "global_descriptor"]: - if self.frequencies[name] and "val_eccv20" in self.scores[name]: - return self.scores[name]['val_eccv20'] - return self.scores["train_loss"] - - @property - def last_epoch(self): - """Tuple (last epoch, last score) or (None, None) before decisive score is computed""" - decisive_scores = self.decisive_scores - if not decisive_scores: - return None, None - - return decisive_scores[-1] - - @property - def best_score(self): - """Tuple (best epoch, best score) or (None, None) before decisive score is computed""" - decisive_scores = self.decisive_scores - if not decisive_scores: - return None, None - - aggr = min - for name in ["local_descriptor", "global_descriptor"]: - if self.frequencies[name] and "val_eccv20" in self.scores[name]: - aggr = max - return aggr(decisive_scores, key=lambda x: x[1]) diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CCleaner 5.59.7230 Cracked.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CCleaner 5.59.7230 Cracked.md deleted file mode 100644 index 216857000d50e1fcd278655600e4586565c36e98..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CCleaner 5.59.7230 Cracked.md +++ /dev/null @@ -1,28 +0,0 @@ - -

      How to Download and Install CCleaner 5.59.7230 Cracked for Free

      -

      CCleaner is a popular tool for cleaning and optimizing your Windows PC. It can remove unnecessary files, cookies, cache, and history from your browser, system, and applications. It can also fix registry errors, manage startup programs, and uninstall unwanted software. CCleaner can make your PC faster, more secure, and more private.

      -

      However, CCleaner is not a free software. You need to pay for a license to unlock its full features and benefits. If you don't want to spend money on CCleaner, you might be tempted to download a cracked version from the internet. A cracked version is a modified version of the software that bypasses the activation process and allows you to use it without paying.

      -

      CCleaner 5.59.7230 Cracked


      DOWNLOAD >>> https://urlcod.com/2uIctj



      -

      But is it safe to download and install CCleaner 5.59.7230 cracked? What are the risks and consequences of using a cracked version of CCleaner? In this article, we will answer these questions and show you how to download and install CCleaner 5.59.7230 cracked for free.

      -

      Is it safe to download and install CCleaner 5.59.7230 cracked?

      -

      The short answer is no. Downloading and installing CCleaner 5.59.7230 cracked is not safe for several reasons:

      -
        -
      • It is illegal. Cracking software is a form of piracy, which violates the intellectual property rights of the software developer. Piracy is a crime that can result in fines or imprisonment in some countries.
      • -
      • It is risky. Cracked software often contains malware, viruses, or spyware that can harm your PC or steal your personal information. You might end up with a corrupted system, a compromised security, or a stolen identity.
      • -
      • It is unreliable. Cracked software often has bugs, errors, or missing features that can affect its performance and functionality. You might experience crashes, freezes, or glitches that can ruin your user experience.
      • -
      • It is unethical. Cracking software deprives the software developer of their rightful income and discourages them from creating more quality products. You might miss out on updates, support, or new features that the developer offers to their legitimate customers.
      • -
      -

      Therefore, we do not recommend downloading and installing CCleaner 5.59.7230 cracked for free. It is better to use the official version of CCleaner from its website[^2^] or buy a license from its online store[^2^]. You will get a safe, legal, reliable, and ethical software that can help you optimize your PC.

      -

      How to download and install CCleaner 5.59.7230 cracked for free?

      -

      If you still want to download and install CCleaner 5.59.7230 cracked for free, despite the risks and consequences, here are the steps you need to follow:

      -
        -
      1. Go to a website that offers CCleaner 5.59.7230 cracked for free download[^3^]. Be careful not to click on any ads or pop-ups that might redirect you to malicious sites or download unwanted programs.
      2. -
      3. Click on the download button or link and wait for the file to be downloaded on your PC.
      4. -
      5. Extract the file using a program like WinRAR or 7-Zip.
      6. -
      7. Run the setup file and follow the instructions on the screen to install CCleaner 5.59.7230 cracked on your PC.
      8. -
      9. Enjoy using CCleaner 5.59.7230 cracked for free.
      10. -
      -

      Note: This article is for educational purposes only. We do not condone or encourage piracy or cracking software in any way.

      -

      7b8c122e87
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CorelDRAW Graphics Suite 2019 V21.3.0.755 Crack TOP Download HERE !.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CorelDRAW Graphics Suite 2019 V21.3.0.755 Crack TOP Download HERE !.md deleted file mode 100644 index 7539c13e07b43403d10016465b294f729a9fb5e2..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CorelDRAW Graphics Suite 2019 V21.3.0.755 Crack TOP Download HERE !.md +++ /dev/null @@ -1,42 +0,0 @@ - -

      CorelDRAW Graphics Suite 2019 v21.3.0.755 Crack Download HERE !

      -

      If you are looking for a powerful and versatile graphic design software that can help you create stunning logos, illustrations, layouts, web graphics, and more, you may have heard of CorelDRAW Graphics Suite 2019. This is a comprehensive suite of applications that includes CorelDRAW, Photo-Paint, AfterShot, PowerTRACE, FontManager, and more. With this suite, you can enjoy professional tools, rich content, intuitive interface, and seamless integration with other programs.

      -

      But what if you don't want to pay for this software or you can't afford it? You may be tempted to download and install a cracked version of it from some shady websites that claim to offer it for free or at a very low price. But before you do that, you should know what a crack is, why it is bad, and what are the alternatives to it.

      -

      CorelDRAW Graphics Suite 2019 v21.3.0.755 Crack Download HERE !


      Download Zip ⇒⇒⇒ https://urlcod.com/2uIa0D



      -

      What is CorelDRAW Graphics Suite 2019 crack?

      -

      A crack is a modified version of a software that has been hacked by pirates to bypass its security system and allow anyone to use it without paying for it. A crack usually involves breaking the registration and copy-protection techniques that are used by the software developer to prevent unauthorized use.

      -

      Why do people use CorelDRAW Graphics Suite 2019 crack?

      -

      The main reason why people use CorelDRAW Graphics Suite 2019 crack is because they want to save money and avoid paying for the official version of the software. They may also think that they can get access to all the features and

      updates that are available for the official version. They may also believe that using a crack is harmless and has no consequences.

      -

      -

      Why using CorelDRAW Graphics Suite 2019 crack is bad?

      -

      However, using CorelDRAW Graphics Suite 2019 crack is not a smart or safe decision. There are many reasons why you should avoid using cracked software, such as:

      -

      Cybersecurity threats: When you download and install a crack from an untrusted source, you are exposing your computer and your data to malware, viruses, spyware, ransomware, and other malicious programs that can harm your system, steal your information, or lock your files. You may also compromise your online security and privacy by allowing hackers to access your network, accounts, or personal details. According to a report by IDC, 33% of PC software is counterfeit and 25% of counterfeit software contains malware.

      -

      Legal problems: Using CorelDRAW Graphics Suite 2019 crack is also illegal and unethical. You are violating the intellectual property rights of the software developer and the terms and conditions of the software license agreement. You are also depriving the developer of the revenue that they deserve for their hard work and innovation. Software piracy is a serious crime that can result in fines, lawsuits, or even jail time. According to the BSA Global Software Survey, the commercial value of unlicensed software was $46.3 billion in 2018.

      -

      Productivity risks: Using CorelDRAW Graphics Suite 2019 crack can also affect your system performance and work quality. Cracked software may not work properly or have bugs, errors, or compatibility issues that can cause crashes, freezes, or data loss. You may also miss out on the latest features, updates, patches, or support that are available for the official version of the software. You may also face ethical dilemmas or credibility issues if you use cracked software for professional or academic purposes.

      -

      What are the alternatives to CorelDRAW Graphics Suite 2019 crack?

      -

      Now that you know why using CorelDRAW Graphics Suite 2019 crack is bad, you may wonder what are the alternatives to it. Fortunately, there are some options that you can consider, such as:

      -

      Official version: The best and safest way to use CorelDRAW Graphics Suite 2019 is to buy the official version from the Corel website or an authorized reseller. The official version will give you access to all the features and benefits of the software, as well as regular updates, patches, support, and tutorials. You will also avoid any legal or security risks that come with using a crack. The official version of CorelDRAW Graphics Suite 2019 costs $499 for a perpetual license or $198 per year for a subscription. You can also get a discount if you are a student or an educator.

      -

      Free or cheaper alternatives: If you can't afford or don't want to pay for the official version of CorelDRAW Graphics Suite 2019, you can also try some other programs that can help you create vector graphics and illustrations without breaking the law or compromising your security. Some of these programs are free or cheaper than CorelDRAW Graphics Suite 2019, and some of them have similar features or functionality. Here are some examples of free or cheaper alternatives to CorelDRAW Graphics Suite 2019:

      - - - - - - - -
      NamePriceFeatures
      InkscapeFreeAn open-source vector graphics editor that supports SVG, PNG, PDF, EPS, and other formats. It has tools for drawing, editing, text, shapes, gradients, filters, clones, paths, transformations, and more.
      Affinity Designer$49.99 (one-time payment)A professional vector graphics editor that supports PSD, AI, PDF, SVG, EPS, and other formats. It has tools for curves, shapes, pens, brushes, fills, strokes, effects, and more. It also has a pixel persona mode that allows raster editing.
      Gravit DesignerFree or $49 per year (Pro version)A cross-platform vector graphics editor that supports SVG, PDF, PNG, JPG, and other formats. It has tools for paths, shapes, text, colors, gradients, effects, symbols, and more. It also has a cloud service that allows online editing and collaboration.
      Sketch$99 per yearA vector graphics editor that supports SVG, PDF, PNG, JPG, and other formats. It has tools for shapes, paths, text, styles, symbols, libraries, plugins, and more. It also has a cloud service that allows online editing and collaboration.
      GIMPFreeAn open-source raster graphics editor that supports PSD, PNG, JPG, GIF, TIFF, and other formats. It has tools for painting, drawing, editing, retouching, color correction, filters, layers, masks, and more. It can also import and export vector graphics using plugins.
      -

      Conclusion

      -

      In conclusion, CorelDRAW Graphics Suite 2019 is a powerful and versatile graphic design software that can help you create stunning logos, illustrations, layouts, web graphics, and more. However, using a cracked version of it is not a good idea. You may face cybersecurity threats, legal problems, productivity risks, and other issues if you use CorelDRAW Graphics Suite 2019 crack from untrusted sources. Instead of using a pirated version of the program, you should use the official version or one of the alternatives that are available for free or at a lower cost.

      -

      FAQs

      -

      Q1: How much does CorelDRAW Graphics Suite 2019 cost?

      -

      A1: The official version of CorelDRAW Graphics Suite 2019 costs $499 for a perpetual license or $198 per year for a subscription. You can also get a discount if you are a student or an educator.

      -

      Q2: How can I get a free trial of CorelDRAW Graphics Suite 2019?

      -

      A2: You can get a free trial of CorelDRAW Graphics Suite 2019 by visiting the Corel website and clicking on the "Free Trial" button. You will need to provide your name and email address to download the trial version. The trial version will last for 15 days and will have all the features of the full version.

      -

      Q3: How can I update CorelDRAW Graphics Suite 2019 to the latest version?

      -

      A3: You can update CorelDRAW Graphics Suite 2019 to the latest version by opening the program and clicking on the "Help" menu. Then select "Check for Updates" and follow the instructions to download and install the updates. You can also visit the Corel website and download the updates manually.

      -

      Q4: How can I learn more about CorelDRAW Graphics Suite 2019 features and tutorials?

      -

      A4: You can learn more about CorelDRAW Graphics Suite 2019 features and tutorials by visiting the Corel website and clicking on the "Learn" menu. There you will find articles, videos, webinars, tips, tricks, and more that can help you learn how to use CorelDRAW Graphics Suite 2019 effectively and creatively. You can also join the Corel community and interact with other users and experts.

      -

      Q5: How can I contact Corel support if I have any questions or issues with CorelDRAW Graphics Suite 2019?

      -

      A5: You can contact Corel support if you have any questions or issues with CorelDRAW Graphics Suite 2019 by visiting the Corel website and clicking on the "Support" menu. There you will find options to chat with an agent, submit a ticket, call a phone number, or browse the knowledge base. You can also check the status of your order, register your product, or request a refund.

      b2dd77e56b
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download-Bde-Delphi-Windows-7-NEW.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download-Bde-Delphi-Windows-7-NEW.md deleted file mode 100644 index 87cc81f41b9321d7960366ced6b5a5ae90392f15..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download-Bde-Delphi-Windows-7-NEW.md +++ /dev/null @@ -1,79 +0,0 @@ -## Download Bde Delphi Windows 7 - - - -**LINK … [https://jinyurl.com/2tx26t](https://jinyurl.com/2tx26t)** - - - -# How to Download and Install BDE for Delphi on Windows 7 - - - -The Borland Database Engine (BDE) is a legacy database access component that was used by many Delphi applications in the past. However, the BDE has not been updated for a long time and is not compatible with modern versions of Windows. If you need to run an old Delphi application that uses the BDE on Windows 7, you have a few options: - - - -1. Download the external BDE installer from your “My registered user downloads” section on Embarcadero's website[^1^] and complete the installation process. This option requires a valid Delphi license and registration. - -2. Download a special adapted BDE version called BDE eXpress[^2^] that works under Windows 10, Windows 8, Windows 7 and Vista. This option requires a purchase of the BDE eXpress software. - -3. Download the original BDE 5.01 installer from a third-party website[^3^] and install it manually. This option is not recommended as it may contain viruses or malware. - - - -After installing the BDE, you may need to adjust some settings in the BDE Administrator tool to make it work properly with your Delphi application. For example, you may need to change the NET DIR parameter to point to a shared network folder where the Paradox or dBase tables are located. - - - -Alternatively, you may consider migrating your Delphi application to use a more modern database access component such as FireDAC or ADO.NET that do not rely on the BDE. - - - -Here are some more paragraphs for your article: - - - -## How to Use FireDAC in Delphi - - - -FireDAC is a powerful data access component that supports multiple database platforms and features. It can replace the BDE in most Delphi applications and provide better performance, security and functionality. To use FireDAC in Delphi, you need to follow these steps: - - - -1. Add the FireDAC components to your project by using the Project Manager or the Component Palette. - -2. Configure the connection parameters for your database using the FDConnection component or the FireDAC Explorer tool. - -3. Use the FDQuery, FDTable or FDStoredProc components to execute SQL statements or stored procedures on your database. - -4. Use the FDDataSet, FDMemTable or FDBatchMove components to manipulate data in memory or transfer data between different sources. - -5. Use the FDGUIx and FDPhys drivers to enable user interface and database-specific features such as dialogs, login prompts, encryption, etc. - - - -## How to Write an Article for a Keyword - - - -Writing an article for a keyword is a common task for web content writers and SEO specialists. The goal is to create a relevant, informative and engaging article that matches the search intent of the keyword and ranks well on search engines. To write an article for a keyword, you need to follow these steps: - - - -1. Research the keyword and its related topics using tools such as Google Trends, Keyword Planner or AnswerThePublic. - -2. Analyze the competition and see what kind of articles are already ranking for the keyword. Look for gaps, opportunities and best practices. - -3. Create an outline for your article based on the keyword and its subtopics. Include a catchy title, an introduction, a main body and a conclusion. - -4. Write your article using clear, concise and grammatically correct language. Use headings, bullet points, images and other formatting elements to make it easy to read and scan. - -5. Optimize your article for SEO by using the keyword and its variations in strategic places such as the title, URL, meta tags, headings and body text. Avoid keyword stuffing and use synonyms and related terms instead. - -6. Edit and proofread your article for spelling, punctuation and factual errors. Use tools such as Grammarly or Hemingway to improve your writing quality. - - - - 1b8d091108 \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Guththila Kavya Sinhala Pdf 14.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Guththila Kavya Sinhala Pdf 14.md deleted file mode 100644 index 1d50a090f4d44510f68200e76c7a9dff8415bbe3..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Guththila Kavya Sinhala Pdf 14.md +++ /dev/null @@ -1,17 +0,0 @@ -
      -

      Guththila Kavya: A Sinhala Poem Based on a Jataka Story

      -

      Guththila Kavya is a Sinhala poem written by an unknown author in the 14th century. It is based on the Guththila Jataka, one of the 550 stories of the previous lives of the Buddha. The poem narrates the story of Guththila, a musician who renounces his worldly life and becomes a monk, and his friend Musila, who tries to tempt him back to the lay life.

      -

      Guththila Kavya Sinhala Pdf 14


      DOWNLOAD ····· https://urlcod.com/2uIauF



      -

      The poem consists of 14 chapters and 1039 verses. It is considered one of the oldest and most important works of Sinhala literature. It showcases the poetic skills, religious knowledge, and cultural values of the medieval Sinhala society. It also reflects the influence of Sanskrit and Pali languages and literature on Sinhala poetry.

      -

      Guththila Kavya is a popular text among Sinhala students and scholars. It is included in the syllabus of the GCE Ordinary Level and Advanced Level examinations in Sri Lanka. It is also studied as a source of historical, linguistic, and literary information. There are several editions and translations of the poem available in print and online formats.

      -

      One of the online sources that provides a pdf version of Guththila Kavya is the Digital Repository of the University of Sri Jayewardenepura. The pdf file contains the text of the poem in Sinhala script, along with an introduction and notes by Professor Wimal G. Balagalle. The file can be accessed from this link: http://dr.lib.sjp.ac.lk/handle/123456789/2452

      -

      - -

      Guththila Kavya is not only a literary masterpiece, but also a cultural treasure. It reveals the rich musical heritage of ancient Sri Lanka, as it describes the instruments, melodies, rhythms, and styles of the two musicians. It also depicts the social and political conditions of the Kotte Kingdom, such as the court intrigues, the royal patronage, the religious ceremonies, and the public festivals. It also illustrates the moral and spiritual values of Buddhism, as it portrays the virtues of humility, gratitude, renunciation, and compassion.

      -

      Guththila Kavya has inspired many creative works in different media and genres. It has been adapted into dramas, operas, films, and songs by various artists and performers. One of the recent adaptations is a musical theatre production by Akhila Sapumal, who has given a fresh and innovative interpretation of the poem. Sapumal's Guththila Kavya combines traditional and modern elements of music, dance, costume, and stagecraft to create a captivating spectacle that appeals to a contemporary audience.

      -

      Guththila Kavya is a testament to the power and beauty of Sinhala poetry. It is a work that deserves to be read, studied, and appreciated by all who love language and literature. It is a work that celebrates the human spirit and its quest for excellence and enlightenment.

      - -

      Guththila Kavya is also a remarkable example of intertextuality and adaptation. It draws from various sources of Buddhist and Sinhala literature, such as the Pali Jataka stories, the Sinhala Thupavamsa, the Sinhala Sandesa poems, and the Sinhala Saddharma Ratnavaliya. It also incorporates elements of Indian musicology, such as the concepts of raga, tala, and alapana. It transforms and reinterprets these sources to create a unique and original work of art that reflects the author's personal vision and expression.

      -

      Guththila Kavya has had a lasting impact on the Sinhala literary and cultural tradition. It has been praised and studied by many scholars and critics as a masterpiece of Sinhala poetry. It has also influenced many writers and artists who have followed in its footsteps. It has been recognized as a national treasure and a source of pride and inspiration for the Sinhala people. It has also contributed to the preservation and promotion of the Sinhala language and literature in Sri Lanka and abroad.

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Public Domain Metal Music.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Public Domain Metal Music.md deleted file mode 100644 index 3ff4500c68345721838c67e1bd72ca78fd8a62d5..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Public Domain Metal Music.md +++ /dev/null @@ -1,22 +0,0 @@ -
      -

      Public Domain Metal Music: A Guide for Rockers

      -

      Metal music is a genre of rock music that is characterized by heavy distortion, aggressive rhythms, and loud vocals. Metal music has many subgenres, such as black metal, death metal, thrash metal, power metal, and more. Metal music is often associated with rebellion, anti-establishment, and counterculture.

      -

      public domain metal music


      Download Ziphttps://urlcod.com/2uIcqw



      -

      But what if you want to use metal music in your own projects, such as videos, podcasts, games, or websites? Do you need to pay royalties or get permission from the artists? Not necessarily. There are some sources of metal music that are in the public domain or licensed under Creative Commons, which means you can use them for free and legally.

      -

      Public domain music is music that has no copyright protection or has expired. This means anyone can use it for any purpose without paying royalties or getting permission. Creative Commons music is music that is licensed by the artists under certain conditions, such as attribution, non-commercial use, or share-alike. This means you can use it for free as long as you follow the terms of the license.

      -

      Here are some websites where you can find public domain or Creative Commons metal music:

      -
        -
      • Free Music Archive: This is a library of high-quality and legal audio downloads. You can browse by genre, mood, instrument, license, and more. You can find over 1,000 metal tracks here, ranging from black metal to power pop. Some of them are in the public domain and some are licensed under Creative Commons. Make sure to check the license of each track before using it.
      • -
      • Serpent Sound Studios: This is a website by Alexander Nakarada, a composer and producer of various genres of music. He offers royalty free metal and rock instrumentals that you can use for any project. You just need to credit him in your description. You can download his tracks in MP3 or WAV format.
      • -
      • Proud Music Library: This is a professional online music library that offers royalty free music for various media projects. You can find over 400 metal tracks here, from heavy metal to progressive metal. Some of them are licensed under Creative Commons and some require a standard license fee. You can preview and download the tracks in MP3 format.
      • -
      -

      These are just some examples of websites where you can find public domain or Creative Commons metal music. There are many more out there if you search online. Just make sure to read the terms and conditions of each website and each track before using them.

      -

      Metal music is a powerful and expressive genre that can enhance your projects and convey your message. With public domain or Creative Commons metal music, you can rock on without breaking the law or the bank.

      Here are some tips on how to use public domain or Creative Commons metal music in your projects:

      -
        -
      1. Choose the right track for your project. Consider the mood, theme, genre, and audience of your project. For example, if you are making a horror video, you might want to use a dark and ominous metal track. If you are making a comedy podcast, you might want to use a light and humorous metal track.
      2. -
      3. Edit the track to fit your project. You can use audio editing software to trim, loop, fade, mix, or add effects to the track. You can also combine different tracks to create a custom soundtrack. For example, you can layer a metal track with a classical track to create a contrast or a harmony.
      4. -
      5. Credit the source of the track. If you are using a public domain track, you don't need to credit anyone. But if you are using a Creative Commons track, you need to follow the terms of the license. Usually, this means you need to mention the name of the artist and the title of the track, and provide a link to the source website or the license. You can do this in your description, credits, or end screen.
      6. -
      -

      By following these tips, you can make the most of public domain or Creative Commons metal music in your projects. You can create original and engaging content that will impress your audience and express your creativity.

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/niizam/sovits-models/data_utils.py b/spaces/niizam/sovits-models/data_utils.py deleted file mode 100644 index 7c76fd1c3a45b8304d916161718c7763874f3e35..0000000000000000000000000000000000000000 --- a/spaces/niizam/sovits-models/data_utils.py +++ /dev/null @@ -1,155 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import modules.commons as commons -import utils -from modules.mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams, all_in_mem: bool = False): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - self.all_in_mem = all_in_mem - if self.all_in_mem: - self.cache = [self.get_audio(p[0]) for p in self.audiopaths] - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - - # Ideally, all data generated after Mar 25 should have .spec.pt - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - f0 = np.load(filename + ".f0.npy") - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - - c = torch.load(filename+ ".soft.pt") - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0]) - - - lmin = min(c.size(-1), spec.size(-1)) - assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length - spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def random_slice(self, c, f0, spec, audio_norm, spk, uv): - # if spec.shape[1] < 30: - # print("skip too short audio:", filename) - # return None - if spec.shape[1] > 800: - start = random.randint(0, spec.shape[1]-800) - end = start + 790 - spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end] - audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def __getitem__(self, index): - if self.all_in_mem: - return self.random_slice(*self.cache[index]) - else: - return self.random_slice(*self.get_audio(self.audiopaths[index][0])) - - def __len__(self): - return len(self.audiopaths) - - -class TextAudioCollate: - - def __call__(self, batch): - batch = [b for b in batch if b is not None] - - input_lengths, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].shape[1] for x in batch]), - dim=0, descending=True) - - max_c_len = max([x[0].size(1) for x in batch]) - max_wav_len = max([x[3].size(1) for x in batch]) - - lengths = torch.LongTensor(len(batch)) - - c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len) - f0_padded = torch.FloatTensor(len(batch), max_c_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - spkids = torch.LongTensor(len(batch), 1) - uv_padded = torch.FloatTensor(len(batch), max_c_len) - - c_padded.zero_() - spec_padded.zero_() - f0_padded.zero_() - wav_padded.zero_() - uv_padded.zero_() - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - c = row[0] - c_padded[i, :, :c.size(1)] = c - lengths[i] = c.size(1) - - f0 = row[1] - f0_padded[i, :f0.size(0)] = f0 - - spec = row[2] - spec_padded[i, :, :spec.size(1)] = spec - - wav = row[3] - wav_padded[i, :, :wav.size(1)] = wav - - spkids[i, 0] = row[4] - - uv = row[5] - uv_padded[i, :uv.size(0)] = uv - - return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/converters/builtin.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/converters/builtin.py deleted file mode 100644 index 3bd48f8f7afc49cf38bf410f01bc673d446f37d7..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/converters/builtin.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput -from . import ( - HFlipConverter, - ToChartResultConverter, - ToChartResultConverterWithConfidences, - ToMaskConverter, - densepose_chart_predictor_output_hflip, - densepose_chart_predictor_output_to_result, - densepose_chart_predictor_output_to_result_with_confidences, - predictor_output_with_coarse_segm_to_mask, - predictor_output_with_fine_and_coarse_segm_to_mask, -) - -ToMaskConverter.register( - DensePoseChartPredictorOutput, predictor_output_with_fine_and_coarse_segm_to_mask -) -ToMaskConverter.register( - DensePoseEmbeddingPredictorOutput, predictor_output_with_coarse_segm_to_mask -) - -ToChartResultConverter.register( - DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result -) - -ToChartResultConverterWithConfidences.register( - DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result_with_confidences -) - -HFlipConverter.register(DensePoseChartPredictorOutput, densepose_chart_predictor_output_hflip) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/losses/utils.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/losses/utils.py deleted file mode 100644 index ceea981d11650af80cb007fe129a3ee4864fc48f..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/losses/utils.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple -import torch -from torch.nn import functional as F - -from detectron2.structures import BoxMode, Instances - -from densepose import DensePoseDataRelative - -LossDict = Dict[str, torch.Tensor] - - -def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z): - """ - Computes utility values for linear interpolation at points v. - The points are given as normalized offsets in the source interval - (v0_src, v0_src + size_src), more precisely: - v = v0_src + v_norm * size_src / 256.0 - The computed utilities include lower points v_lo, upper points v_hi, - interpolation weights v_w and flags j_valid indicating whether the - points falls into the destination interval (v0_dst, v0_dst + size_dst). - - Args: - v_norm (:obj: `torch.Tensor`): tensor of size N containing - normalized point offsets - v0_src (:obj: `torch.Tensor`): tensor of size N containing - left bounds of source intervals for normalized points - size_src (:obj: `torch.Tensor`): tensor of size N containing - source interval sizes for normalized points - v0_dst (:obj: `torch.Tensor`): tensor of size N containing - left bounds of destination intervals - size_dst (:obj: `torch.Tensor`): tensor of size N containing - destination interval sizes - size_z (int): interval size for data to be interpolated - - Returns: - v_lo (:obj: `torch.Tensor`): int tensor of size N containing - indices of lower values used for interpolation, all values are - integers from [0, size_z - 1] - v_hi (:obj: `torch.Tensor`): int tensor of size N containing - indices of upper values used for interpolation, all values are - integers from [0, size_z - 1] - v_w (:obj: `torch.Tensor`): float tensor of size N containing - interpolation weights - j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing - 0 for points outside the estimation interval - (v0_est, v0_est + size_est) and 1 otherwise - """ - v = v0_src + v_norm * size_src / 256.0 - j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst) - v_grid = (v - v0_dst) * size_z / size_dst - v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1) - v_hi = (v_lo + 1).clamp(max=size_z - 1) - v_grid = torch.min(v_hi.float(), v_grid) - v_w = v_grid - v_lo.float() - return v_lo, v_hi, v_w, j_valid - - -class BilinearInterpolationHelper: - """ - Args: - packed_annotations: object that contains packed annotations - j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing - 0 for points to be discarded and 1 for points to be selected - y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values - in z_est for each point - y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values - in z_est for each point - x_lo (:obj: `torch.Tensor`): int tensor of indices of left values - in z_est for each point - x_hi (:obj: `torch.Tensor`): int tensor of indices of right values - in z_est for each point - w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M; - contains upper-left value weight for each point - w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M; - contains upper-right value weight for each point - w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M; - contains lower-left value weight for each point - w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M; - contains lower-right value weight for each point - """ - - def __init__( - self, - packed_annotations: Any, - j_valid: torch.Tensor, - y_lo: torch.Tensor, - y_hi: torch.Tensor, - x_lo: torch.Tensor, - x_hi: torch.Tensor, - w_ylo_xlo: torch.Tensor, - w_ylo_xhi: torch.Tensor, - w_yhi_xlo: torch.Tensor, - w_yhi_xhi: torch.Tensor, - ): - for k, v in locals().items(): - if k != "self": - setattr(self, k, v) - - @staticmethod - def from_matches( - packed_annotations: Any, densepose_outputs_size_hw: Tuple[int, int] - ) -> "BilinearInterpolationHelper": - """ - Args: - packed_annotations: annotations packed into tensors, the following - attributes are required: - - bbox_xywh_gt - - bbox_xywh_est - - x_gt - - y_gt - - point_bbox_with_dp_indices - - point_bbox_indices - densepose_outputs_size_hw (tuple [int, int]): resolution of - DensePose predictor outputs (H, W) - Return: - An instance of `BilinearInterpolationHelper` used to perform - interpolation for the given annotation points and output resolution - """ - - zh, zw = densepose_outputs_size_hw - x0_gt, y0_gt, w_gt, h_gt = packed_annotations.bbox_xywh_gt[ - packed_annotations.point_bbox_with_dp_indices - ].unbind(dim=1) - x0_est, y0_est, w_est, h_est = packed_annotations.bbox_xywh_est[ - packed_annotations.point_bbox_with_dp_indices - ].unbind(dim=1) - x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities( - packed_annotations.x_gt, x0_gt, w_gt, x0_est, w_est, zw - ) - y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities( - packed_annotations.y_gt, y0_gt, h_gt, y0_est, h_est, zh - ) - j_valid = jx_valid * jy_valid - - w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w) - w_ylo_xhi = x_w * (1.0 - y_w) - w_yhi_xlo = (1.0 - x_w) * y_w - w_yhi_xhi = x_w * y_w - - return BilinearInterpolationHelper( - packed_annotations, - j_valid, - y_lo, - y_hi, - x_lo, - x_hi, - w_ylo_xlo, # pyre-ignore[6] - w_ylo_xhi, - # pyre-fixme[6]: Expected `Tensor` for 9th param but got `float`. - w_yhi_xlo, - w_yhi_xhi, - ) - - def extract_at_points( - self, - z_est, - slice_fine_segm=None, - w_ylo_xlo=None, - w_ylo_xhi=None, - w_yhi_xlo=None, - w_yhi_xhi=None, - ): - """ - Extract ground truth values z_gt for valid point indices and estimated - values z_est using bilinear interpolation over top-left (y_lo, x_lo), - top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right - (y_hi, x_hi) values in z_est with corresponding weights: - w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi. - Use slice_fine_segm to slice dim=1 in z_est - """ - slice_fine_segm = ( - self.packed_annotations.fine_segm_labels_gt - if slice_fine_segm is None - else slice_fine_segm - ) - w_ylo_xlo = self.w_ylo_xlo if w_ylo_xlo is None else w_ylo_xlo - w_ylo_xhi = self.w_ylo_xhi if w_ylo_xhi is None else w_ylo_xhi - w_yhi_xlo = self.w_yhi_xlo if w_yhi_xlo is None else w_yhi_xlo - w_yhi_xhi = self.w_yhi_xhi if w_yhi_xhi is None else w_yhi_xhi - - index_bbox = self.packed_annotations.point_bbox_indices - z_est_sampled = ( - z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_lo] * w_ylo_xlo - + z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_hi] * w_ylo_xhi - + z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_lo] * w_yhi_xlo - + z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_hi] * w_yhi_xhi - ) - return z_est_sampled - - -def resample_data( - z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode: str = "nearest", padding_mode: str = "zeros" -): - """ - Args: - z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be - resampled - bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing - source bounding boxes in format XYWH - bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing - destination bounding boxes in format XYWH - Return: - zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout) - with resampled values of z, where D is the discretization size - """ - n = bbox_xywh_src.size(0) - assert n == bbox_xywh_dst.size(0), ( - "The number of " - "source ROIs for resampling ({}) should be equal to the number " - "of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0)) - ) - x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1) - x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1) - x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1 - y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1 - x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1 - y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1 - grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout - grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout - grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout) - grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout) - dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout) - dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout) - x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout) - y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout) - grid_x = grid_w_expanded * dx_expanded + x0_expanded - grid_y = grid_h_expanded * dy_expanded + y0_expanded - grid = torch.stack((grid_x, grid_y), dim=3) - # resample Z from (N, C, H, W) into (N, C, Hout, Wout) - zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True) - return zresampled - - -class AnnotationsAccumulator(ABC): - """ - Abstract class for an accumulator for annotations that can produce - dense annotations packed into tensors. - """ - - @abstractmethod - def accumulate(self, instances_one_image: Instances): - """ - Accumulate instances data for one image - - Args: - instances_one_image (Instances): instances data to accumulate - """ - pass - - @abstractmethod - def pack(self) -> Any: - """ - Pack data into tensors - """ - pass - - -@dataclass -class PackedChartBasedAnnotations: - """ - Packed annotations for chart-based model training. The following attributes - are defined: - - fine_segm_labels_gt (tensor [K] of `int64`): GT fine segmentation point labels - - x_gt (tensor [K] of `float32`): GT normalized X point coordinates - - y_gt (tensor [K] of `float32`): GT normalized Y point coordinates - - u_gt (tensor [K] of `float32`): GT point U values - - v_gt (tensor [K] of `float32`): GT point V values - - coarse_segm_gt (tensor [N, S, S] of `float32`): GT segmentation for bounding boxes - - bbox_xywh_gt (tensor [N, 4] of `float32`): selected GT bounding boxes in - XYWH format - - bbox_xywh_est (tensor [N, 4] of `float32`): selected matching estimated - bounding boxes in XYWH format - - point_bbox_with_dp_indices (tensor [K] of `int64`): indices of bounding boxes - with DensePose annotations that correspond to the point data - - point_bbox_indices (tensor [K] of `int64`): indices of bounding boxes - (not necessarily the selected ones with DensePose data) that correspond - to the point data - - bbox_indices (tensor [N] of `int64`): global indices of selected bounding - boxes with DensePose annotations; these indices could be used to access - features that are computed for all bounding boxes, not only the ones with - DensePose annotations. - Here K is the total number of points and N is the total number of instances - with DensePose annotations. - """ - - fine_segm_labels_gt: torch.Tensor - x_gt: torch.Tensor - y_gt: torch.Tensor - u_gt: torch.Tensor - v_gt: torch.Tensor - coarse_segm_gt: Optional[torch.Tensor] - bbox_xywh_gt: torch.Tensor - bbox_xywh_est: torch.Tensor - point_bbox_with_dp_indices: torch.Tensor - point_bbox_indices: torch.Tensor - bbox_indices: torch.Tensor - - -class ChartBasedAnnotationsAccumulator(AnnotationsAccumulator): - """ - Accumulates annotations by batches that correspond to objects detected on - individual images. Can pack them together into single tensors. - """ - - def __init__(self): - self.i_gt = [] - self.x_gt = [] - self.y_gt = [] - self.u_gt = [] - self.v_gt = [] - self.s_gt = [] - self.bbox_xywh_gt = [] - self.bbox_xywh_est = [] - self.point_bbox_with_dp_indices = [] - self.point_bbox_indices = [] - self.bbox_indices = [] - self.nxt_bbox_with_dp_index = 0 - self.nxt_bbox_index = 0 - - def accumulate(self, instances_one_image: Instances): - """ - Accumulate instances data for one image - - Args: - instances_one_image (Instances): instances data to accumulate - """ - boxes_xywh_est = BoxMode.convert( - instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS - ) - boxes_xywh_gt = BoxMode.convert( - instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS - ) - n_matches = len(boxes_xywh_gt) - assert n_matches == len( - boxes_xywh_est - ), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes" - if not n_matches: - # no detection - GT matches - return - if ( - not hasattr(instances_one_image, "gt_densepose") - or instances_one_image.gt_densepose is None - ): - # no densepose GT for the detections, just increase the bbox index - self.nxt_bbox_index += n_matches - return - for box_xywh_est, box_xywh_gt, dp_gt in zip( - boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose - ): - if (dp_gt is not None) and (len(dp_gt.x) > 0): - # pyre-fixme[6]: For 1st argument expected `Tensor` but got `float`. - # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `float`. - self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt) - self.nxt_bbox_index += 1 - - def _do_accumulate( - self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: DensePoseDataRelative - ): - """ - Accumulate instances data for one image, given that the data is not empty - - Args: - box_xywh_gt (tensor): GT bounding box - box_xywh_est (tensor): estimated bounding box - dp_gt (DensePoseDataRelative): GT densepose data - """ - self.i_gt.append(dp_gt.i) - self.x_gt.append(dp_gt.x) - self.y_gt.append(dp_gt.y) - self.u_gt.append(dp_gt.u) - self.v_gt.append(dp_gt.v) - if hasattr(dp_gt, "segm"): - self.s_gt.append(dp_gt.segm.unsqueeze(0)) - self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4)) - self.bbox_xywh_est.append(box_xywh_est.view(-1, 4)) - self.point_bbox_with_dp_indices.append( - torch.full_like(dp_gt.i, self.nxt_bbox_with_dp_index) - ) - self.point_bbox_indices.append(torch.full_like(dp_gt.i, self.nxt_bbox_index)) - self.bbox_indices.append(self.nxt_bbox_index) - self.nxt_bbox_with_dp_index += 1 - - def pack(self) -> Optional[PackedChartBasedAnnotations]: - """ - Pack data into tensors - """ - if not len(self.i_gt): - # TODO: - # returning proper empty annotations would require - # creating empty tensors of appropriate shape and - # type on an appropriate device; - # we return None so far to indicate empty annotations - return None - return PackedChartBasedAnnotations( - fine_segm_labels_gt=torch.cat(self.i_gt, 0).long(), - x_gt=torch.cat(self.x_gt, 0), - y_gt=torch.cat(self.y_gt, 0), - u_gt=torch.cat(self.u_gt, 0), - v_gt=torch.cat(self.v_gt, 0), - # ignore segmentation annotations, if not all the instances contain those - coarse_segm_gt=torch.cat(self.s_gt, 0) - if len(self.s_gt) == len(self.bbox_xywh_gt) - else None, - bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0), - bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0), - point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0).long(), - point_bbox_indices=torch.cat(self.point_bbox_indices, 0).long(), - bbox_indices=torch.as_tensor( - self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device - ).long(), - ) - - -def extract_packed_annotations_from_matches( - proposals_with_targets: List[Instances], accumulator: AnnotationsAccumulator -) -> Any: - for proposals_targets_per_image in proposals_with_targets: - accumulator.accumulate(proposals_targets_per_image) - return accumulator.pack() - - -def sample_random_indices( - n_indices: int, n_samples: int, device: Optional[torch.device] = None -) -> Optional[torch.Tensor]: - """ - Samples `n_samples` random indices from range `[0..n_indices - 1]`. - If `n_indices` is smaller than `n_samples`, returns `None` meaning that all indices - are selected. - Args: - n_indices (int): total number of indices - n_samples (int): number of indices to sample - device (torch.device): the desired device of returned tensor - Return: - Tensor of selected vertex indices, or `None`, if all vertices are selected - """ - if (n_samples <= 0) or (n_indices <= n_samples): - return None - indices = torch.randperm(n_indices, device=device)[:n_samples] - return indices diff --git a/spaces/nomic-ai/Chinese-Vicuna_guanaco_belle_merge_v1.0/README.md b/spaces/nomic-ai/Chinese-Vicuna_guanaco_belle_merge_v1.0/README.md deleted file mode 100644 index fcbe4cdb1729c59f0d2f38952480f2d8317a78b0..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/Chinese-Vicuna_guanaco_belle_merge_v1.0/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Chinese-Vicuna/guanaco_belle_merge_v1.0 -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- \ No newline at end of file diff --git a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/sparse_linear_layer_test.cc b/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/sparse_linear_layer_test.cc deleted file mode 100644 index bb256ec05965c3ed39b657ec43ba9a58ba415857..0000000000000000000000000000000000000000 --- a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/sparse_linear_layer_test.cc +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sparse_matmul/layers/sparse_linear_layer.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" -#include "sparse_matmul/numerics/test_utils.h" - -namespace csrblocksparse { -namespace { - -constexpr int kBlockSize = 4; -constexpr int kSize = 256; -constexpr int kNumThreads = 4; -constexpr int kCols = 1; - -void SlicedThreadBody(SpinBarrier* spin_barrier, int tid, - const FatCacheAlignedVector& rhs, - SparseLinearLayer* sparse_linear_layer, - FatCacheAlignedVector* out, bool use_relu) { - sparse_linear_layer->MatVec(rhs, use_relu, tid, /*replicas=*/1, - /*output_stride=*/0, out); - spin_barrier->barrier(); -} - -// Tests that a Layer that has been SliceForThreads computes the same result as -// the original layer. This is a basic test that all the slicing didn't mess up -// any of the computations. -TEST(CsrBlockSparseMatrix, SliceForThreads) { - MaskedSparseMatrix matrix(kSize, kSize, 0.95, kBlockSize, kBlockSize); - FatCacheAlignedVector rhs(kSize, kCols); - CacheAlignedVector bias(kSize); - FatCacheAlignedVector out1(kSize, kCols); - - bias.FillRandom(); - rhs.FillRandom(); - out1.FillZero(); - FatCacheAlignedVector out_reference = out1; - CsrBlockSparseMatrix sparse_matrix(matrix); - SparseLinearLayer sparse_linear_layer(std::move(sparse_matrix), - std::move(bias)); - sparse_linear_layer.PrepareForThreads(1); - sparse_linear_layer.MatVec(rhs, /*relu=*/true, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out_reference); - std::vector fake_split_points = {0, 48 / kBlockSize, 128 / kBlockSize, - 208 / kBlockSize, kSize / kBlockSize}; - sparse_linear_layer.PrepareForThreads(kNumThreads); - sparse_linear_layer.SliceForThreads(fake_split_points); - csrblocksparse::LaunchOnThreadsWithBarrier(kNumThreads, SlicedThreadBody, rhs, - &sparse_linear_layer, &out1, - /*relu=*/true); - - CheckResult(out_reference, out1, kCols); -} - -void LayersThreadBody(SpinBarrier* spin_barrier, int tid, - const FatCacheAlignedVector& rhs, - SparseLinearLayer* sparse_linear_layer1, - SparseLinearLayer* sparse_linear_layer2, - FatCacheAlignedVector* out1, - FatCacheAlignedVector* out2, bool use_relu) { - sparse_linear_layer1->MatVec(rhs, use_relu, tid, /*replicas=*/1, - /*output_stride=*/0, out1); - // NOTE no barrier here! - sparse_linear_layer2->MatVec(*out1, use_relu, tid, /*replicas=*/1, - /*output_stride=*/0, out2); - spin_barrier->barrier(); -} - -// Tests that a pair of layers computes the same result whether or not the -// second layer has been SliceForThreads. This is a more critical test that -// the replacement of barriers with producer-consumer locks works. -// Must be run with tsan to really test it properly. -TEST(CsrBlockSparseMatrix, SliceForThreadsLayers) { - MaskedSparseMatrix matrix1(kSize, kSize, 0.95, kBlockSize, kBlockSize); - FatCacheAlignedVector rhs(kSize, kCols); - CacheAlignedVector bias1(kSize); - FatCacheAlignedVector out1(kSize, kCols); - MaskedSparseMatrix matrix2(kSize, kSize, 0.95, kBlockSize, kBlockSize); - CacheAlignedVector bias2(kSize); - FatCacheAlignedVector out2(kSize, kCols); - - bias1.FillRandom(); - rhs.FillRandom(); - bias2.FillRandom(); - out1.FillZero(); - out2.FillZero(); - FatCacheAlignedVector out_reference = out2; - CsrBlockSparseMatrix sparse_matrix1(matrix1); - SparseLinearLayer layer1(std::move(sparse_matrix1), - std::move(bias1)); - CsrBlockSparseMatrix sparse_matrix2(matrix2); - SparseLinearLayer layer2(std::move(sparse_matrix2), - std::move(bias2)); - layer1.PrepareForThreads(1); - layer2.PrepareForThreads(1); - layer1.MatVec(rhs, /*relu=*/true, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out1); - layer2.MatVec(out1, /*relu=*/true, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out_reference); - layer1.PrepareForThreads(kNumThreads); - layer2.PrepareForThreads(kNumThreads); - layer2.SliceForThreads(layer1.split_points()); - csrblocksparse::LaunchOnThreadsWithBarrier(kNumThreads, LayersThreadBody, rhs, - &layer1, &layer2, &out1, &out2, - /*relu=*/true); - - CheckResult(out_reference, out2, kCols); -} - -// Tests that a Layer that has been DoubleBlockHeight()-ed computes the same -// result as original layer. (Float compute type). -TEST(CsrBlockSparseMatrix, Float8x4) { - using ComputeType = float; - using RhsType = float; - using BiasType = float; - MaskedSparseMatrix matrix(kSize, kSize, 0.95, kBlockSize, kBlockSize); - matrix.CastWeights(); - FatCacheAlignedVector rhs(kSize, kCols); - CacheAlignedVector bias(kSize); - FatCacheAlignedVector out1(kSize, kCols); - - bias.FillRandom(); - rhs.FillRandom(); - out1.FillZero(); - FatCacheAlignedVector out_reference = out1; - CsrBlockSparseMatrix sparse_matrix(matrix); - SparseLinearLayer sparse_linear_layer( - std::move(sparse_matrix), std::move(bias)); - sparse_linear_layer.PrepareForThreads(1); - sparse_linear_layer.MatVec(rhs, /*relu=*/true, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out_reference); - sparse_linear_layer.DoubleBlockHeight(); - sparse_linear_layer.PrepareForThreads(1); - sparse_linear_layer.MatVec(rhs, /*relu=*/true, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out1); - CheckResult(out_reference, out1, kCols); -} - -// Tests that a Layer that has been DoubleBlockHeight()-ed computes the same -// result as original layer. (Fixed16 compute type). -TEST(CsrBlockSparseMatrix, Fixed8x4) { - using ComputeType = csrblocksparse::fixed16<4>; - using RhsType = csrblocksparse::fixed16<4>; - using BiasType = typename TypeOfProduct::type; - MaskedSparseMatrix matrix(kSize, kSize, 0.95, kBlockSize, kBlockSize); - matrix.CastWeights(); - FatCacheAlignedVector rhs(kSize, kCols); - CacheAlignedVector bias(kSize); - FatCacheAlignedVector out1(kSize, kCols); - - bias.FillRandom(); - rhs.FillRandom(); - out1.FillZero(); - FatCacheAlignedVector out_reference = out1; - CsrBlockSparseMatrix sparse_matrix(matrix); - SparseLinearLayer sparse_linear_layer( - std::move(sparse_matrix), std::move(bias)); - sparse_linear_layer.PrepareForThreads(1); - sparse_linear_layer.MatVec(rhs, /*relu=*/false, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out_reference); - sparse_linear_layer.DoubleBlockHeight(); - sparse_linear_layer.PrepareForThreads(1); - sparse_linear_layer.MatVec(rhs, /*relu=*/false, /*tid=*/0, /*replicas=*/1, - /*output_stride=*/0, &out1); - CheckResult(out_reference, out1, kCols); -} - -TEST(SparseLinearLayerTest, PrintCompiles) { - SparseLinearLayer sparse_linear_layer; - sparse_linear_layer.Print(); -} - -} // namespace -} // namespace csrblocksparse diff --git a/spaces/nus-cs5647-team-5/Mandarin_Tone_Evaluation/download_default_datalist.py b/spaces/nus-cs5647-team-5/Mandarin_Tone_Evaluation/download_default_datalist.py deleted file mode 100644 index b25ce8d65a9cdd1f34efedf543bea6837c71a3ef..0000000000000000000000000000000000000000 --- a/spaces/nus-cs5647-team-5/Mandarin_Tone_Evaluation/download_default_datalist.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import logging -import json -import requests - -logging.basicConfig( - format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', - level=logging.INFO) - -DEFAULT_DATALIST_PATH = 'datalist/' -if not os.path.exists(DEFAULT_DATALIST_PATH): - os.makedirs(DEFAULT_DATALIST_PATH) - -URL_DATALIST_INDEX = "https://d.ailemon.net/asrt_assets/datalist/index.json" -rsp_index = requests.get(URL_DATALIST_INDEX) -rsp_index.encoding = 'utf-8' -if rsp_index.ok: - logging.info('Has connected to ailemon\'s download server...') -else: - logging.error('%s%s', 'Can not connected to ailemon\'s download server.', - 'please check your network connection.') - -index_json = json.loads(rsp_index.text) -if index_json['status_code'] != 200: - raise Exception(index_json['status_message']) - -body = index_json['body'] -logging.info('start to download datalist from ailemon\'s download server...') - -url_prefix = body['url_prefix'] -for i in range(len(body['datalist'])): - print(i, body['datalist'][i]['name']) -print(len(body['datalist']), 'all datalist') -num = input('Please choose which you select: (default all)') -if len(num) == 0: - num = len(body['datalist']) -else: - num = int(num) - - -def deal_download(datalist_item, url_prefix_str, datalist_path): - ''' - to deal datalist file download - ''' - logging.info('%s%s', 'start to download datalist ', datalist_item['name']) - save_path = os.path.join(datalist_path, datalist_item['name']) - if not os.path.exists(save_path): - os.makedirs(save_path) - logging.info('%s`%s`', 'Created directory ', save_path) - - for filename in datalist_item['filelist']: - tmp_url = url_prefix_str + datalist_item['name'] + '/' + filename - save_filename = os.path.join(save_path, filename) - rsp_listfile = requests.get(tmp_url) - - with open(save_filename, "wb") as file_pointer: - file_pointer.write(rsp_listfile.content) - if rsp_listfile.ok: - logging.info('%s `%s` %s', 'Download', filename, 'complete') - else: - logging.error('%s%s%s%s%s', 'Can not download ', filename, - ' from ailemon\'s download server. ', - 'http status ok is ', str(rsp_listfile.ok)) - - -if num == len(body['datalist']): - for i in range(len(body['datalist'])): - deal_download(body['datalist'][i], body['url_prefix'], DEFAULT_DATALIST_PATH) -else: - deal_download(body['datalist'][num], body['url_prefix'], DEFAULT_DATALIST_PATH) - -logging.info('%s%s%s', 'Datalist files download complete. ', - 'Please remember to download these datasets from ', - body['dataset_download_page_url']) diff --git a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_sharp/test_mask_refine.sh b/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_sharp/test_mask_refine.sh deleted file mode 100644 index 0cdcfcfc6e04df41fa76f1050415dc0bb8b6d0fe..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_sharp/test_mask_refine.sh +++ /dev/null @@ -1,23 +0,0 @@ -if [ -z "$4" ] - then - echo "Need input parameter!" - echo "Usage: bash `basename "$0"` \$CONFIG \$MODEL \$DATASET \$GPUID" - exit -fi - -ROOT=`git rev-parse --show-toplevel` -export PYTHONPATH=$ROOT:$PYTHONPATH - -mkdir -p logs - -config=$1 -model=$2 -dataset=$3 -gpu=$4 - -CUDA_VISIBLE_DEVICES=$gpu python -u $ROOT/tools/test.py \ - --config $config \ - --resume $model \ - --mask --refine \ - --dataset $dataset 2>&1 | tee logs/test_$dataset.log - diff --git a/spaces/oguzakif/video-object-remover/SiamMask/utils/tracker_config.py b/spaces/oguzakif/video-object-remover/SiamMask/utils/tracker_config.py deleted file mode 100644 index 664d01d5f798444ba59f2fd829c304b9b4eeb18b..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/utils/tracker_config.py +++ /dev/null @@ -1,51 +0,0 @@ -# -------------------------------------------------------- -# SiamMask -# Licensed under The MIT License -# Written by Qiang Wang (wangqiang2015 at ia.ac.cn) -# -------------------------------------------------------- -from __future__ import division -from SiamMask.utils.anchors import Anchors - - -class TrackerConfig(object): - # These are the default hyper-params for SiamMask - penalty_k = 0.09 - window_influence = 0.39 - lr = 0.38 - seg_thr = 0.3 # for mask - windowing = 'cosine' # to penalize large displacements [cosine/uniform] - # Params from the network architecture, have to be consistent with the training - exemplar_size = 127 # input z size - instance_size = 255 # input x size (search region) - total_stride = 8 - out_size = 63 # for mask - base_size = 8 - score_size = (instance_size-exemplar_size)//total_stride+1+base_size - context_amount = 0.5 # context amount for the exemplar - ratios = [0.33, 0.5, 1, 2, 3] - scales = [8, ] - anchor_num = len(ratios) * len(scales) - round_dight = 0 - anchor = [] - - def update(self, newparam=None, anchors=None): - if newparam: - for key, value in newparam.items(): - setattr(self, key, value) - if anchors is not None: - if isinstance(anchors, dict): - anchors = Anchors(anchors) - if isinstance(anchors, Anchors): - self.total_stride = anchors.stride - self.ratios = anchors.ratios - self.scales = anchors.scales - self.round_dight = anchors.round_dight - self.renew() - - def renew(self): - self.score_size = (self.instance_size - self.exemplar_size) // self.total_stride + 1 + self.base_size - self.anchor_num = len(self.ratios) * len(self.scales) - - - - diff --git a/spaces/olimpa/CalendarJs/index.html b/spaces/olimpa/CalendarJs/index.html deleted file mode 100644 index e952670e61ab305ca8bc8cfd9098dee6b580cd07..0000000000000000000000000000000000000000 --- a/spaces/olimpa/CalendarJs/index.html +++ /dev/null @@ -1,337 +0,0 @@ - - - - - - - - - - - - - - - - Calendar - - -
      - -
      -
      -

      Calendar demo

      - Fork me on GitHub -
      -
      -
      -
      - - - - -
      -
      -
      - -
      -
      -
      -
      -
      -
      - - - - - - - - \ No newline at end of file diff --git a/spaces/onemriganka/hello_space/README.md b/spaces/onemriganka/hello_space/README.md deleted file mode 100644 index aa17c3b83e4725e892c8d77539cdc8eb407e773b..0000000000000000000000000000000000000000 --- a/spaces/onemriganka/hello_space/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hello Space -emoji: 🐨 -colorFrom: yellow -colorTo: purple -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/owaiskha9654/Custom_Yolov7/models/__init__.py b/spaces/owaiskha9654/Custom_Yolov7/models/__init__.py deleted file mode 100644 index 84952a8167bc2975913a6def6b4f027d566552a9..0000000000000000000000000000000000000000 --- a/spaces/owaiskha9654/Custom_Yolov7/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# init \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/quicktour.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/quicktour.md deleted file mode 100644 index e256f6c932233c793e463bf968056c449bf65a32..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/quicktour.md +++ /dev/null @@ -1,313 +0,0 @@ - -[[open-in-colab]] - -# 훑어보기 - -Diffusion 모델은 이미지나 오디오와 같은 관심 샘플들을 생성하기 위해 랜덤 가우시안 노이즈를 단계별로 제거하도록 학습됩니다. 이로 인해 생성 AI에 대한 관심이 매우 높아졌으며, 인터넷에서 diffusion 생성 이미지의 예를 본 적이 있을 것입니다. 🧨 Diffusers는 누구나 diffusion 모델들을 널리 이용할 수 있도록 하기 위한 라이브러리입니다. - -개발자든 일반 사용자든 이 훑어보기를 통해 🧨 diffusers를 소개하고 빠르게 생성할 수 있도록 도와드립니다! 알아야 할 라이브러리의 주요 구성 요소는 크게 세 가지입니다: - -* [`DiffusionPipeline`]은 추론을 위해 사전 학습된 diffusion 모델에서 샘플을 빠르게 생성하도록 설계된 높은 수준의 엔드투엔드 클래스입니다. -* Diffusion 시스템 생성을 위한 빌딩 블록으로 사용할 수 있는 널리 사용되는 사전 학습된 [model](./api/models) 아키텍처 및 모듈. -* 다양한 [schedulers](./api/schedulers/overview) - 학습을 위해 노이즈를 추가하는 방법과 추론 중에 노이즈 제거된 이미지를 생성하는 방법을 제어하는 알고리즘입니다. - -훑어보기에서는 추론을 위해 [`DiffusionPipeline`]을 사용하는 방법을 보여준 다음, 모델과 스케줄러를 결합하여 [`DiffusionPipeline`] 내부에서 일어나는 일을 복제하는 방법을 안내합니다. - - - -훑어보기는 간결한 버전의 🧨 Diffusers 소개로서 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) 빠르게 시작할 수 있도록 도와드립니다. 디퓨저의 목표, 디자인 철학, 핵심 API에 대한 추가 세부 정보를 자세히 알아보려면 노트북을 확인하세요! - - - -시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: - -```py -# 주석 풀어서 Colab에 필요한 라이브러리 설치하기. -#!pip install --upgrade diffusers accelerate transformers -``` - -- [🤗 Accelerate](https://huggingface.co/docs/accelerate/index)는 추론 및 학습을 위한 모델 로딩 속도를 높여줍니다. -- [🤗 Transformers](https://huggingface.co/docs/transformers/index)는 [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)과 같이 가장 많이 사용되는 diffusion 모델을 실행하는 데 필요합니다. - -## DiffusionPipeline - -[`DiffusionPipeline`] 은 추론을 위해 사전 학습된 diffusion 시스템을 사용하는 가장 쉬운 방법입니다. 모델과 스케줄러를 포함하는 엔드 투 엔드 시스템입니다. 다양한 작업에 [`DiffusionPipeline`]을 바로 사용할 수 있습니다. 아래 표에서 지원되는 몇 가지 작업을 살펴보고, 지원되는 작업의 전체 목록은 [🧨 Diffusers Summary](./api/pipelines/overview#diffusers-summary) 표에서 확인할 수 있습니다. - -| **Task** | **Description** | **Pipeline** -|------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| -| Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | -| Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) | -| Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) | -| Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) | -| Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) | - -먼저 [`DiffusionPipeline`]의 인스턴스를 생성하고 다운로드할 파이프라인 체크포인트를 지정합니다. -허깅페이스 허브에 저장된 모든 [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads)에 대해 [`DiffusionPipeline`]을 사용할 수 있습니다. -이 훑어보기에서는 text-to-image 생성을 위한 [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 체크포인트를 로드합니다. - - - -[Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) 모델의 경우, 모델을 실행하기 전에 [라이선스](https://huggingface.co/spaces/CompVis/stable-diffusion-license)를 먼저 주의 깊게 읽어주세요. 🧨 Diffusers는 불쾌하거나 유해한 콘텐츠를 방지하기 위해 [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py)를 구현하고 있지만, 모델의 향상된 이미지 생성 기능으로 인해 여전히 잠재적으로 유해한 콘텐츠가 생성될 수 있습니다. - - - -[`~DiffusionPipeline.from_pretrained`] 방법으로 모델 로드하기: - -```python ->>> from diffusers import DiffusionPipeline - ->>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") -``` - -The [`DiffusionPipeline`]은 모든 모델링, 토큰화, 스케줄링 컴포넌트를 다운로드하고 캐시합니다. Stable Diffusion Pipeline은 무엇보다도 [`UNet2DConditionModel`]과 [`PNDMScheduler`]로 구성되어 있음을 알 수 있습니다: - -```py ->>> pipeline -StableDiffusionPipeline { - "_class_name": "StableDiffusionPipeline", - "_diffusers_version": "0.13.1", - ..., - "scheduler": [ - "diffusers", - "PNDMScheduler" - ], - ..., - "unet": [ - "diffusers", - "UNet2DConditionModel" - ], - "vae": [ - "diffusers", - "AutoencoderKL" - ] -} -``` - -이 모델은 약 14억 개의 파라미터로 구성되어 있으므로 GPU에서 파이프라인을 실행할 것을 강력히 권장합니다. -PyTorch에서와 마찬가지로 제너레이터 객체를 GPU로 이동할 수 있습니다: - -```python ->>> pipeline.to("cuda") -``` - -이제 `파이프라인`에 텍스트 프롬프트를 전달하여 이미지를 생성한 다음 노이즈가 제거된 이미지에 액세스할 수 있습니다. 기본적으로 이미지 출력은 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) 객체로 감싸집니다. - -```python ->>> image = pipeline("An image of a squirrel in Picasso style").images[0] ->>> image -``` - -
      - -
      - -`save`를 호출하여 이미지를 저장합니다: - -```python ->>> image.save("image_of_squirrel_painting.png") -``` - -### 로컬 파이프라인 - -파이프라인을 로컬에서 사용할 수도 있습니다. 유일한 차이점은 가중치를 먼저 다운로드해야 한다는 점입니다: - -```bash -!git lfs install -!git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 -``` - -그런 다음 저장된 가중치를 파이프라인에 로드합니다: - -```python ->>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") -``` - -이제 위 섹션에서와 같이 파이프라인을 실행할 수 있습니다. - -### 스케줄러 교체 - -스케줄러마다 노이즈 제거 속도와 품질이 서로 다릅니다. 자신에게 가장 적합한 스케줄러를 찾는 가장 좋은 방법은 직접 사용해 보는 것입니다! 🧨 Diffusers의 주요 기능 중 하나는 스케줄러 간에 쉽게 전환이 가능하다는 것입니다. 예를 들어, 기본 스케줄러인 [`PNDMScheduler`]를 [`EulerDiscreteScheduler`]로 바꾸려면, [`~diffusers.ConfigMixin.from_config`] 메서드를 사용하여 로드하세요: - -```py ->>> from diffusers import EulerDiscreteScheduler - ->>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") ->>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) -``` - -새 스케줄러로 이미지를 생성해보고 어떤 차이가 있는지 확인해 보세요! - -다음 섹션에서는 모델과 스케줄러라는 [`DiffusionPipeline`]을 구성하는 컴포넌트를 자세히 살펴보고 이러한 컴포넌트를 사용하여 고양이 이미지를 생성하는 방법을 배워보겠습니다. - -## 모델 - -대부분의 모델은 노이즈가 있는 샘플을 가져와 각 시간 간격마다 노이즈가 적은 이미지와 입력 이미지 사이의 차이인 *노이즈 잔차*(다른 모델은 이전 샘플을 직접 예측하거나 속도 또는 [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)을 예측하는 학습을 합니다)을 예측합니다. 모델을 믹스 앤 매치하여 다른 diffusion 시스템을 만들 수 있습니다. - -모델은 [`~ModelMixin.from_pretrained`] 메서드로 시작되며, 이 메서드는 모델 가중치를 로컬에 캐시하여 다음에 모델을 로드할 때 더 빠르게 로드할 수 있습니다. 훑어보기에서는 고양이 이미지에 대해 학습된 체크포인트가 있는 기본적인 unconditional 이미지 생성 모델인 [`UNet2DModel`]을 로드합니다: - -```py ->>> from diffusers import UNet2DModel - ->>> repo_id = "google/ddpm-cat-256" ->>> model = UNet2DModel.from_pretrained(repo_id) -``` - -모델 매개변수에 액세스하려면 `model.config`를 호출합니다: - -```py ->>> model.config -``` - -모델 구성은 🧊 고정된 🧊 딕셔너리로, 모델이 생성된 후에는 해당 매개 변수들을 변경할 수 없습니다. 이는 의도적인 것으로, 처음에 모델 아키텍처를 정의하는 데 사용된 매개변수는 동일하게 유지하면서 다른 매개변수는 추론 중에 조정할 수 있도록 하기 위한 것입니다. - -가장 중요한 매개변수들은 다음과 같습니다: - -* `sample_size`: 입력 샘플의 높이 및 너비 치수입니다. -* `in_channels`: 입력 샘플의 입력 채널 수입니다. -* `down_block_types` 및 `up_block_types`: UNet 아키텍처를 생성하는 데 사용되는 다운 및 업샘플링 블록의 유형. -* `block_out_channels`: 다운샘플링 블록의 출력 채널 수. 업샘플링 블록의 입력 채널 수에 역순으로 사용되기도 합니다. -* `layers_per_block`: 각 UNet 블록에 존재하는 ResNet 블록의 수입니다. - -추론에 모델을 사용하려면 랜덤 가우시안 노이즈로 이미지 모양을 만듭니다. 모델이 여러 개의 무작위 노이즈를 수신할 수 있으므로 'batch' 축, 입력 채널 수에 해당하는 'channel' 축, 이미지의 높이와 너비를 나타내는 'sample_size' 축이 있어야 합니다: - -```py ->>> import torch - ->>> torch.manual_seed(0) - ->>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) ->>> noisy_sample.shape -torch.Size([1, 3, 256, 256]) -``` - -추론을 위해 모델에 노이즈가 있는 이미지와 `timestep`을 전달합니다. 'timestep'은 입력 이미지의 노이즈 정도를 나타내며, 시작 부분에 더 많은 노이즈가 있고 끝 부분에 더 적은 노이즈가 있습니다. 이를 통해 모델이 diffusion 과정에서 시작 또는 끝에 더 가까운 위치를 결정할 수 있습니다. `sample` 메서드를 사용하여 모델 출력을 얻습니다: - -```py ->>> with torch.no_grad(): -... noisy_residual = model(sample=noisy_sample, timestep=2).sample -``` - -하지만 실제 예를 생성하려면 노이즈 제거 프로세스를 안내할 스케줄러가 필요합니다. 다음 섹션에서는 모델을 스케줄러와 결합하는 방법에 대해 알아봅니다. - -## 스케줄러 - -스케줄러는 모델 출력이 주어졌을 때 노이즈가 많은 샘플에서 노이즈가 적은 샘플로 전환하는 것을 관리합니다 - 이 경우 'noisy_residual'. - - - -🧨 Diffusers는 Diffusion 시스템을 구축하기 위한 툴박스입니다. [`DiffusionPipeline`]을 사용하면 미리 만들어진 Diffusion 시스템을 편리하게 시작할 수 있지만, 모델과 스케줄러 구성 요소를 개별적으로 선택하여 사용자 지정 Diffusion 시스템을 구축할 수도 있습니다. - - - -훑어보기의 경우, [`~diffusers.ConfigMixin.from_config`] 메서드를 사용하여 [`DDPMScheduler`]를 인스턴스화합니다: - -```py ->>> from diffusers import DDPMScheduler - ->>> scheduler = DDPMScheduler.from_config(repo_id) ->>> scheduler -DDPMScheduler { - "_class_name": "DDPMScheduler", - "_diffusers_version": "0.13.1", - "beta_end": 0.02, - "beta_schedule": "linear", - "beta_start": 0.0001, - "clip_sample": true, - "clip_sample_range": 1.0, - "num_train_timesteps": 1000, - "prediction_type": "epsilon", - "trained_betas": null, - "variance_type": "fixed_small" -} -``` - - - -💡 스케줄러가 구성에서 어떻게 인스턴스화되는지 주목하세요. 모델과 달리 스케줄러에는 학습 가능한 가중치가 없으며 매개변수도 없습니다! - - - -가장 중요한 매개변수는 다음과 같습니다: - -* `num_train_timesteps`: 노이즈 제거 프로세스의 길이, 즉 랜덤 가우스 노이즈를 데이터 샘플로 처리하는 데 필요한 타임스텝 수입니다. -* `beta_schedule`: 추론 및 학습에 사용할 노이즈 스케줄 유형입니다. -* `beta_start` 및 `beta_end`: 노이즈 스케줄의 시작 및 종료 노이즈 값입니다. - -노이즈가 약간 적은 이미지를 예측하려면 스케줄러의 [`~diffusers.DDPMScheduler.step`] 메서드에 모델 출력, `timestep`, 현재 `sample`을 전달하세요. - -```py ->>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample ->>> less_noisy_sample.shape -``` - -`less_noisy_sample`을 다음 `timestep`으로 넘기면 노이즈가 더 줄어듭니다! 이제 이 모든 것을 한데 모아 전체 노이즈 제거 과정을 시각화해 보겠습니다. - -먼저 노이즈 제거된 이미지를 후처리하여 `PIL.Image`로 표시하는 함수를 만듭니다: - -```py ->>> import PIL.Image ->>> import numpy as np - - ->>> def display_sample(sample, i): -... image_processed = sample.cpu().permute(0, 2, 3, 1) -... image_processed = (image_processed + 1.0) * 127.5 -... image_processed = image_processed.numpy().astype(np.uint8) - -... image_pil = PIL.Image.fromarray(image_processed[0]) -... display(f"Image at step {i}") -... display(image_pil) -``` - -노이즈 제거 프로세스의 속도를 높이려면 입력과 모델을 GPU로 옮기세요: - -```py ->>> model.to("cuda") ->>> noisy_sample = noisy_sample.to("cuda") -``` - -이제 노이즈가 적은 샘플의 잔차를 예측하고 스케줄러로 노이즈가 적은 샘플을 계산하는 노이즈 제거 루프를 생성합니다: - -```py ->>> import tqdm - ->>> sample = noisy_sample - ->>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): -... # 1. predict noise residual -... with torch.no_grad(): -... residual = model(sample, t).sample - -... # 2. compute less noisy image and set x_t -> x_t-1 -... sample = scheduler.step(residual, t, sample).prev_sample - -... # 3. optionally look at image -... if (i + 1) % 50 == 0: -... display_sample(sample, i + 1) -``` - -가만히 앉아서 고양이가 소음으로만 생성되는 것을 지켜보세요!😻 - -
      - -
      - -## 다음 단계 - -이번 훑어보기에서 🧨 Diffusers로 멋진 이미지를 만들어 보셨기를 바랍니다! 다음 단계로 넘어가세요: - -* [training](./tutorials/basic_training) 튜토리얼에서 모델을 학습하거나 파인튜닝하여 나만의 이미지를 생성할 수 있습니다. -* 다양한 사용 사례는 공식 및 커뮤니티 [학습 또는 파인튜닝 스크립트](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) 예시를 참조하세요. -* 스케줄러 로드, 액세스, 변경 및 비교에 대한 자세한 내용은 [다른 스케줄러 사용](./using-diffusers/schedulers) 가이드에서 확인하세요. -* [Stable Diffusion](./stable_diffusion) 가이드에서 프롬프트 엔지니어링, 속도 및 메모리 최적화, 고품질 이미지 생성을 위한 팁과 요령을 살펴보세요. -* [GPU에서 파이토치 최적화](./optimization/fp16) 가이드와 [애플 실리콘(M1/M2)에서의 Stable Diffusion](./optimization/mps) 및 [ONNX 런타임](./optimization/onnx) 실행에 대한 추론 가이드를 통해 🧨 Diffuser 속도를 높이는 방법을 더 자세히 알아보세요. \ No newline at end of file diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.c b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.c deleted file mode 100644 index 1e652963cdb76fe628d0a33bc270d2c25a0f3770..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * File : prroi_pooling_gpu.c - * Author : Jiayuan Mao, Tete Xiao - * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com - * Date : 07/13/2018 - * - * Distributed under terms of the MIT license. - * Copyright (c) 2017 Megvii Technology Limited. - */ - -#include -#include - -#include -#include - -#include - -#include "prroi_pooling_gpu_impl.cuh" - - -at::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) { - int nr_rois = rois.size(0); - int nr_channels = features.size(1); - int height = features.size(2); - int width = features.size(3); - int top_count = nr_rois * nr_channels * pooled_height * pooled_width; - auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options()); - - if (output.numel() == 0) { - THCudaCheck(cudaGetLastError()); - return output; - } - - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - PrRoIPoolingForwardGpu( - stream, features.data(), rois.data(), output.data(), - nr_channels, height, width, pooled_height, pooled_width, spatial_scale, - top_count - ); - - THCudaCheck(cudaGetLastError()); - return output; -} - -at::Tensor prroi_pooling_backward_cuda( - const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, - int pooled_height, int pooled_width, float spatial_scale) { - - auto features_diff = at::zeros_like(features); - - int nr_rois = rois.size(0); - int batch_size = features.size(0); - int nr_channels = features.size(1); - int height = features.size(2); - int width = features.size(3); - int top_count = nr_rois * nr_channels * pooled_height * pooled_width; - int bottom_count = batch_size * nr_channels * height * width; - - if (output.numel() == 0) { - THCudaCheck(cudaGetLastError()); - return features_diff; - } - - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - PrRoIPoolingBackwardGpu( - stream, - features.data(), rois.data(), output.data(), output_diff.data(), - features_diff.data(), - nr_channels, height, width, pooled_height, pooled_width, spatial_scale, - top_count, bottom_count - ); - - THCudaCheck(cudaGetLastError()); - return features_diff; -} - -at::Tensor prroi_pooling_coor_backward_cuda( - const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, - int pooled_height, int pooled_width, float spatial_scale) { - - auto coor_diff = at::zeros_like(rois); - - int nr_rois = rois.size(0); - int nr_channels = features.size(1); - int height = features.size(2); - int width = features.size(3); - int top_count = nr_rois * nr_channels * pooled_height * pooled_width; - int bottom_count = nr_rois * 5; - - if (output.numel() == 0) { - THCudaCheck(cudaGetLastError()); - return coor_diff; - } - - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - PrRoIPoolingCoorBackwardGpu( - stream, - features.data(), rois.data(), output.data(), output_diff.data(), - coor_diff.data(), - nr_channels, height, width, pooled_height, pooled_width, spatial_scale, - top_count, bottom_count - ); - - THCudaCheck(cudaGetLastError()); - return coor_diff; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("prroi_pooling_forward_cuda", &prroi_pooling_forward_cuda, "PRRoIPooling_forward"); - m.def("prroi_pooling_backward_cuda", &prroi_pooling_backward_cuda, "PRRoIPooling_backward"); - m.def("prroi_pooling_coor_backward_cuda", &prroi_pooling_coor_backward_cuda, "PRRoIPooling_backward_coor"); -} diff --git a/spaces/perilli/tortoise-tts-v2/utils/stft.py b/spaces/perilli/tortoise-tts-v2/utils/stft.py deleted file mode 100644 index 8de6bfb090c77e0de2c99dd05fde1f8bfd726b51..0000000000000000000000000000000000000000 --- a/spaces/perilli/tortoise-tts-v2/utils/stft.py +++ /dev/null @@ -1,193 +0,0 @@ -""" -BSD 3-Clause License - -Copyright (c) 2017, Prem Seetharaman -All rights reserved. - -* Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import torch -import numpy as np -import torch.nn.functional as F -from torch.autograd import Variable -from scipy.signal import get_window -from librosa.util import pad_center, tiny -import librosa.util as librosa_util - - -def window_sumsquare(window, n_frames, hop_length=200, win_length=800, - n_fft=800, dtype=np.float32, norm=None): - """ - # from librosa 0.6 - Compute the sum-square envelope of a window function at a given hop length. - - This is used to estimate modulation effects induced by windowing - observations in short-time fourier transforms. - - Parameters - ---------- - window : string, tuple, number, callable, or list-like - Window specification, as in `get_window` - - n_frames : int > 0 - The number of analysis frames - - hop_length : int > 0 - The number of samples to advance between frames - - win_length : [optional] - The length of the window function. By default, this matches `n_fft`. - - n_fft : int > 0 - The length of each analysis frame. - - dtype : np.dtype - The data type of the output - - Returns - ------- - wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` - The sum-squared envelope of the window function - """ - if win_length is None: - win_length = n_fft - - n = n_fft + hop_length * (n_frames - 1) - x = np.zeros(n, dtype=dtype) - - # Compute the squared window at the desired length - win_sq = get_window(window, win_length, fftbins=True) - win_sq = librosa_util.normalize(win_sq, norm=norm)**2 - win_sq = librosa_util.pad_center(win_sq, n_fft) - - # Fill the envelope - for i in range(n_frames): - sample = i * hop_length - x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] - return x - - -class STFT(torch.nn.Module): - """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" - def __init__(self, filter_length=800, hop_length=200, win_length=800, - window='hann'): - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.window = window - self.forward_transform = None - scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]), - np.imag(fourier_basis[:cutoff, :])]) - - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :]) - - if window is not None: - assert(filter_length >= win_length) - # get window and zero center pad it to filter_length - fft_window = get_window(window, win_length, fftbins=True) - fft_window = pad_center(fft_window, filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis *= fft_window - - self.register_buffer('forward_basis', forward_basis.float()) - self.register_buffer('inverse_basis', inverse_basis.float()) - - def transform(self, input_data): - num_batches = input_data.size(0) - num_samples = input_data.size(1) - - self.num_samples = num_samples - - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - input_data = F.pad( - input_data.unsqueeze(1), - (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), - mode='reflect') - input_data = input_data.squeeze(1) - - forward_transform = F.conv1d( - input_data, - Variable(self.forward_basis, requires_grad=False), - stride=self.hop_length, - padding=0) - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - - magnitude = torch.sqrt(real_part**2 + imag_part**2) - phase = torch.autograd.Variable( - torch.atan2(imag_part.data, real_part.data)) - - return magnitude, phase - - def inverse(self, magnitude, phase): - recombine_magnitude_phase = torch.cat( - [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1) - - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - Variable(self.inverse_basis, requires_grad=False), - stride=self.hop_length, - padding=0) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, magnitude.size(-1), hop_length=self.hop_length, - win_length=self.win_length, n_fft=self.filter_length, - dtype=np.float32) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0]) - window_sum = torch.autograd.Variable( - torch.from_numpy(window_sum), requires_grad=False) - window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[:, :, int(self.filter_length/2):] - inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):] - - return inverse_transform - - def forward(self, input_data): - self.magnitude, self.phase = self.transform(input_data) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction \ No newline at end of file diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/LLM_chatGPT_3_5.py b/spaces/phyloforfun/VoucherVision/vouchervision/LLM_chatGPT_3_5.py deleted file mode 100644 index 48314ec0bf9af5f0cac80fbeb2520b0bb926f19d..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/LLM_chatGPT_3_5.py +++ /dev/null @@ -1,427 +0,0 @@ -import openai -import os, json, sys, inspect, time, requests -from langchain.output_parsers import StructuredOutputParser, ResponseSchema -from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate -from langchain.llms import OpenAI -from langchain.chat_models import ChatOpenAI, AzureChatOpenAI -from langchain.schema import HumanMessage -from general_utils import num_tokens_from_string - -currentdir = os.path.dirname(os.path.abspath( - inspect.getfile(inspect.currentframe()))) -parentdir = os.path.dirname(currentdir) -sys.path.append(parentdir) - -from prompts import PROMPT_UMICH_skeleton_all_asia, PROMPT_OCR_Organized, PROMPT_UMICH_skeleton_all_asia_GPT4, PROMPT_OCR_Organized_GPT4, PROMPT_JSON -from prompt_catalog import PromptCatalog - -RETRY_DELAY = 61 # Wait 60 seconds before retrying -MAX_RETRIES = 5 # Maximum number of retries - - -def azure_call(model, messages): - response = model(messages=messages) - return response - -def OCR_to_dict(is_azure, logger, MODEL, prompt, llm, prompt_version): - for i in range(MAX_RETRIES): - try: - do_use_SOP = True - - if do_use_SOP: - logger.info(f'Waiting for {MODEL} API call --- Using StructuredOutputParser') - response = structured_output_parser(is_azure, MODEL, llm, prompt, logger, prompt_version) - if response is None: - return None - else: - return response['Dictionary'] - - else: - ### Direct GPT ### - logger.info(f'Waiting for {MODEL} API call') - if not is_azure: - response = openai.ChatCompletion.create( - model=MODEL, - temperature = 0, - messages=[ - {"role": "system", "content": "You are a helpful assistant acting as a transcription expert and your job is to transcribe herbarium specimen labels based on OCR data and reformat it to meet Darwin Core Archive Standards into a Python dictionary based on certain rules."}, - {"role": "user", "content": prompt}, - ], - response_format={type: "json_object"},#################################### TODO ############################################################################### - max_tokens=4096, - ) - # print the model's response - return response.choices[0].message['content'] - else: - msg = HumanMessage( - content=prompt - ) - response = azure_call(llm, [msg]) - return response.content - except Exception as e: - logger.error(f'{e}') - if i < MAX_RETRIES - 1: # No delay needed after the last try - time.sleep(RETRY_DELAY) - else: - raise - -# def OCR_to_dict(logger, MODEL, prompt, OCR, BASE_URL, HEADERS): -# for i in range(MAX_RETRIES): -# try: -# do_use_SOP = False - -# if do_use_SOP: -# logger.info(f'Waiting for {MODEL} API call --- Using StructuredOutputParser -- Content') -# response = structured_output_parser(MODEL, OCR, prompt, logger) -# if response is None: -# return None -# else: -# return response['Dictionary'] - -# else: -# ### Direct GPT through Azure ### -# logger.info(f'Waiting for {MODEL} API call') -# response = azure_gpt_request(prompt, BASE_URL, HEADERS, model_name=MODEL) - -# # Handle the response data. Note: You might need to adjust the following line based on the exact response format of the Azure API. -# content = response.get("choices", [{}])[0].get("message", {}).get("content", "") -# return content -# except requests.exceptions.RequestException as e: # Replace openai.error.APIError with requests exception. -# # Handle HTTP exceptions. You can adjust this based on the Azure API's error responses. -# if e.response.status_code == 502: -# logger.info(f' *** 502 error was encountered, wait and try again ***') -# if i < MAX_RETRIES - 1: -# time.sleep(RETRY_DELAY) -# else: -# raise - - -def OCR_to_dict_16k(is_azure, logger, MODEL, prompt, llm, prompt_version): - for i in range(MAX_RETRIES): - try: - fs = FunctionSchema() - response = openai.ChatCompletion.create( - model=MODEL, - temperature = 0, - messages=[ - {"role": "system", "content": "You are a helpful assistant acting as a transcription expert and your job is to transcribe herbarium specimen labels based on OCR data and reformat it to meet Darwin Core Archive Standards into a Python dictionary based on certain rules."}, - {"role": "user", "content": prompt}, - ], - max_tokens=8000, - function_call= "none", - functions= fs.format_C21_AA_V1() - - ) - # Try to parse the response into JSON - call_failed = False - try: - response_string = response.choices[0].message['content'] - except: - call_failed = True - response_string = prompt - - if not call_failed: - try: - # Try to parse the response into JSON - response_dict = json.loads(response_string) - return response_dict['Dictionary'] - except json.JSONDecodeError: - # If the response is not a valid JSON, call the structured_output_parser_for_function_calls_fail function - logger.info(f'Invalid JSON response, calling structured_output_parser_for_function_calls_fail function') - logger.info(f'Waiting for {MODEL} API call --- Using StructuredOutputParser --- JSON Fixer') - response_sop = structured_output_parser_for_function_calls_fail(is_azure, MODEL, response_string, logger, llm, prompt_version, is_helper=False) - if response_sop is None: - return None - else: - return response_sop['Dictionary'] - else: - try: - logger.info(f'Call Failed. Attempting fallback JSON parse without guidance') - logger.info(f'Waiting for {MODEL} API call --- Using StructuredOutputParser --- JSON Fixer') - response_sop = structured_output_parser_for_function_calls_fail(is_azure, MODEL, response_string, logger, llm, prompt_version, is_helper=False) - if response_sop is None: - return None - else: - return response_sop['Dictionary'] - except: - return None - except Exception as e: - # if e.status_code == 401: # or you can check the error message - logger.info(f' *** 401 error was encountered, wait and try again ***') - # If a 401 error was encountered, wait and try again - if i < MAX_RETRIES - 1: # No delay needed after the last try - time.sleep(RETRY_DELAY) - else: - # If it was a different error, re-raise it - raise - -def structured_output_parser(is_azure, MODEL, llm, prompt_template, logger, prompt_version, is_helper=False): - if not is_helper: - response_schemas = [ - ResponseSchema(name="SpeciesName", description="Taxonomic determination, genus_species"), - ResponseSchema(name="Dictionary", description='Formatted JSON object'),]#prompt_template),] - elif is_helper: - response_schemas = [ - ResponseSchema(name="Dictionary", description='Formatted JSON object'),#prompt_template), - ResponseSchema(name="Summary", description="A one sentence summary of the content"),] - - output_parser = StructuredOutputParser.from_response_schemas(response_schemas) - - format_instructions = output_parser.get_format_instructions() - - prompt = ChatPromptTemplate( - messages=[ - HumanMessagePromptTemplate.from_template("Parse the OCR text into the correct structured format.\n{format_instructions}\n{question}") - ], - input_variables=["question"], - partial_variables={"format_instructions": format_instructions} - ) - - # Handle Azure vs OpenAI implementation - if is_azure: - _input = prompt.format_prompt(question=prompt_template) - msg = HumanMessage(content=_input.to_string()) - output = azure_call(llm, [msg]) - else: - chat_model = ChatOpenAI(temperature=0, model=MODEL) - _input = prompt.format_prompt(question=prompt_template) - output = chat_model(_input.to_messages()) - - # Log token length if running with Gradio - try: - nt = num_tokens_from_string(_input.to_string(), "cl100k_base") - logger.info(f'Prompt token length --- {nt}') - except: - pass - - # Parse the output - try: - # Check if output is of type 'ai' and parse accordingly - if output.type == 'ai': - parsed_content = output.content - logger.info(f'Formatted JSON\n{parsed_content}') - else: - # If not 'ai', log and set parsed_content to None or a default value - logger.error('Output type is not "ai". Unable to parse.') - return None - - # Clean up the parsed content - parsed_content = parsed_content.replace('\n', "").replace('\t', "").replace('|', "") - - # Attempt to parse the cleaned content - try: - refined_response = output_parser.parse(parsed_content) - return refined_response - except Exception as parse_error: - # Handle parsing errors specifically - logger.error(f'Parsing Error: {parse_error}') - return structured_output_parser_for_function_calls_fail(is_azure, MODEL, parsed_content, logger, llm, prompt_version, is_helper) - - except Exception as e: - # Handle any other exceptions that might occur - logger.error(f'Unexpected Error: {e}') - return None - -def structured_output_parser_for_function_calls_fail(is_azure, MODEL, failed_response, logger, llm, prompt_version, is_helper=False, try_ind=0): - if try_ind == 0: - original_failed_response = failed_response - if try_ind > 5: - return None - - # prompt_redo = PROMPT_JSON('helper' if is_helper else 'dict', failed_response) - Prompt = PromptCatalog() - if prompt_version in ['prompt_v1_verbose', 'prompt_v1_verbose_noDomainKnowledge']: - prompt_redo = Prompt.prompt_gpt_redo_v1(failed_response) - elif prompt_version in ['prompt_v2_json_rules']: - prompt_redo = Prompt.prompt_gpt_redo_v2(failed_response) - else: - prompt_redo = Prompt.prompt_v2_custom_redo(failed_response, is_palm=False) - - response_schemas = [ - ResponseSchema(name="Summary", description="A one sentence summary of the content"), - ResponseSchema(name="Dictionary", description='Formatted JSON object') - ] - - output_parser = StructuredOutputParser.from_response_schemas(response_schemas) - format_instructions = output_parser.get_format_instructions() - - prompt = ChatPromptTemplate( - messages=[ - HumanMessagePromptTemplate.from_template("The following text contains JSON formatted text, but there is an error that you need to correct.\n{format_instructions}\n{question}") - ], - input_variables=["question"], - partial_variables={"format_instructions": format_instructions} - ) - - _input = prompt.format_prompt(question=prompt_redo) - - # Log token length if running with Gradio - try: - nt = num_tokens_from_string(_input.to_string(), "cl100k_base") - logger.info(f'Prompt Redo token length --- {nt}') - except: - pass - - if is_azure: - msg = HumanMessage(content=_input.to_string()) - output = azure_call(llm, [msg]) - else: - chat_model = ChatOpenAI(temperature=0, model=MODEL) - output = chat_model(_input.to_messages()) - - try: - refined_response = output_parser.parse(output.content) - except json.decoder.JSONDecodeError as e: - try_ind += 1 - error_message = str(e) - redo_content = f'The error messsage is: {error_message}\nThe broken JSON object is: {original_failed_response}' # Use original_failed_response here - logger.info(f'[Failed JSON Object]\n{original_failed_response}') # And here - refined_response = structured_output_parser_for_function_calls_fail( - is_azure, MODEL, redo_content, logger, llm, prompt_version, is_helper, try_ind, original_failed_response - ) - except: - try_ind += 1 - logger.info(f'[Failed JSON Object]\n{original_failed_response}') # And here - refined_response = structured_output_parser_for_function_calls_fail( - is_azure, MODEL, original_failed_response, logger, llm, prompt_version, is_helper, try_ind, original_failed_response - ) - - return refined_response - - - - -class FunctionSchema: - def __init__(self): - pass - - def format_C21_AA_V1(self): - return [ - { - "name": "format_C21_AA_V1", - "description": "Format the given data into a specific dictionary", - "parameters": { - "type": "object", - "properties": {}, # specify parameters here if your function requires any - "required": [] # list of required parameters - }, - "output_type": "json", - "output_schema": { - "type": "object", - "properties": { - "Dictionary": { - "type": "object", - "properties": { - "Catalog Number": {"type": "array", "items": {"type": "string"}}, - "Genus": {"type": "array", "items": {"type": "string"}}, - "Species": {"type": "array", "items": {"type": "string"}}, - "subspecies": {"type": "array", "items": {"type": "string"}}, - "variety": {"type": "array", "items": {"type": "string"}}, - "forma": {"type": "array", "items": {"type": "string"}}, - "Country": {"type": "array", "items": {"type": "string"}}, - "State": {"type": "array", "items": {"type": "string"}}, - "County": {"type": "array", "items": {"type": "string"}}, - "Locality Name": {"type": "array", "items": {"type": "string"}}, - "Min Elevation": {"type": "array", "items": {"type": "string"}}, - "Max Elevation": {"type": "array", "items": {"type": "string"}}, - "Elevation Units": {"type": "array", "items": {"type": "string"}}, - "Verbatim Coordinates": {"type": "array", "items": {"type": "string"}}, - "Datum": {"type": "array", "items": {"type": "string"}}, - "Cultivated": {"type": "array", "items": {"type": "string"}}, - "Habitat": {"type": "array", "items": {"type": "string"}}, - "Collectors": {"type": "array", "items": {"type": "string"}}, - "Collector Number": {"type": "array", "items": {"type": "string"}}, - "Verbatim Date": {"type": "array", "items": {"type": "string"}}, - "Date": {"type": "array", "items": {"type": "string"}}, - "End Date": {"type": "array", "items": {"type": "string"}} - } - }, - "SpeciesName": { - "type": "object", - "properties": { - "taxonomy": {"type": "array", "items": {"type": "string"}} - } - } - } - } - } - ] - - def format_C21_AA_V1_helper(self): - return [ - { - "name": "format_C21_AA_V1_helper", - "description": "Helper function for format_C21_AA_V1 to further format the given data", - "parameters": { - "type": "object", - "properties": {}, # specify parameters here if your function requires any - "required": [] # list of required parameters - }, - "output_type": "json", - "output_schema": { - "type": "object", - "properties": { - "Dictionary": { - "type": "object", - "properties": { - "TAXONOMY": { - "type": "object", - "properties": { - "Order": {"type": "array", "items": {"type": "string"}}, - "Family": {"type": "array", "items": {"type": "string"}}, - "Genus":{"type": "array", "items": {"type": "string"}}, - "Species": {"type": "array", "items": {"type": "string"}}, - "Subspecies": {"type": "array", "items": {"type": "string"}}, - "Variety": {"type": "array", "items": {"type": "string"}}, - "Forma": {"type": "array", "items": {"type": "string"}}, - } - }, - "GEOGRAPHY": { - "type": "object", - "properties": { - "Country": {"type": "array", "items": {"type": "string"}}, - "State": {"type": "array", "items": {"type": "string"}}, - "Prefecture": {"type": "array", "items": {"type": "string"}}, - "Province": {"type": "array", "items": {"type": "string"}}, - "District": {"type": "array", "items": {"type": "string"}}, - "County": {"type": "array", "items": {"type": "string"}}, - "City": {"type": "array", "items": {"type": "string"}}, - "Administrative Division": {"type": "array", "items": {"type": "string"}}, - } - }, - "LOCALITY": { - "type": "object", - "properties": { - "Landscape": {"type": "array", "items": {"type": "string"}}, - "Nearby Places": {"type": "array", "items": {"type": "string"}}, - } - }, - "COLLECTING": { - "type": "object", - "properties": { - "Collector": {"type": "array", "items": {"type": "string"}}, - "Collector's Number": {"type": "array", "items": {"type": "string"}}, - "Verbatim Date": {"type": "array", "items": {"type": "string"}}, - "Formatted Date": {"type": "array", "items": {"type": "string"}}, - "Cultivation Status": {"type": "array", "items": {"type": "string"}}, - "Habitat Description": {"type": "array", "items": {"type": "string"}}, - } - }, - "MISCELLANEOUS": { - "type": "object", - "properties": { - "Additional Information": {"type": "array", "items": {"type": "string"}}, - } - } - } - }, - "Summary": { - "type": "object", - "properties": { - "Content Summary": {"type": "array", "items": {"type": "string"}} - } - } - } - } - } - ] diff --git a/spaces/pinecone/extractive-question-answering/README.md b/spaces/pinecone/extractive-question-answering/README.md deleted file mode 100644 index 745317bc3d8b7c3767c9408929ad46cadaa4251c..0000000000000000000000000000000000000000 --- a/spaces/pinecone/extractive-question-answering/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Extractive Question Answering -emoji: 🦀 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/base_command.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/base_command.py deleted file mode 100644 index 6a3b8e6c213a9a8069b38729ab3a0c16a213ce62..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/base_command.py +++ /dev/null @@ -1,236 +0,0 @@ -"""Base Command class, and related routines""" - -import functools -import logging -import logging.config -import optparse -import os -import sys -import traceback -from optparse import Values -from typing import Any, Callable, List, Optional, Tuple - -from pip._vendor.rich import traceback as rich_traceback - -from pip._internal.cli import cmdoptions -from pip._internal.cli.command_context import CommandContextMixIn -from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip._internal.cli.status_codes import ( - ERROR, - PREVIOUS_BUILD_DIR_ERROR, - UNKNOWN_ERROR, - VIRTUALENV_NOT_FOUND, -) -from pip._internal.exceptions import ( - BadCommand, - CommandError, - DiagnosticPipError, - InstallationError, - NetworkConnectionError, - PreviousBuildDirError, - UninstallationError, -) -from pip._internal.utils.filesystem import check_path_owner -from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging -from pip._internal.utils.misc import get_prog, normalize_path -from pip._internal.utils.temp_dir import TempDirectoryTypeRegistry as TempDirRegistry -from pip._internal.utils.temp_dir import global_tempdir_manager, tempdir_registry -from pip._internal.utils.virtualenv import running_under_virtualenv - -__all__ = ["Command"] - -logger = logging.getLogger(__name__) - - -class Command(CommandContextMixIn): - usage: str = "" - ignore_require_venv: bool = False - - def __init__(self, name: str, summary: str, isolated: bool = False) -> None: - super().__init__() - - self.name = name - self.summary = summary - self.parser = ConfigOptionParser( - usage=self.usage, - prog=f"{get_prog()} {name}", - formatter=UpdatingDefaultsHelpFormatter(), - add_help_option=False, - name=name, - description=self.__doc__, - isolated=isolated, - ) - - self.tempdir_registry: Optional[TempDirRegistry] = None - - # Commands should add options to this option group - optgroup_name = f"{self.name.capitalize()} Options" - self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) - - # Add the general options - gen_opts = cmdoptions.make_option_group( - cmdoptions.general_group, - self.parser, - ) - self.parser.add_option_group(gen_opts) - - self.add_options() - - def add_options(self) -> None: - pass - - def handle_pip_version_check(self, options: Values) -> None: - """ - This is a no-op so that commands by default do not do the pip version - check. - """ - # Make sure we do the pip version check if the index_group options - # are present. - assert not hasattr(options, "no_index") - - def run(self, options: Values, args: List[str]) -> int: - raise NotImplementedError - - def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]: - # factored out for testability - return self.parser.parse_args(args) - - def main(self, args: List[str]) -> int: - try: - with self.main_context(): - return self._main(args) - finally: - logging.shutdown() - - def _main(self, args: List[str]) -> int: - # We must initialize this before the tempdir manager, otherwise the - # configuration would not be accessible by the time we clean up the - # tempdir manager. - self.tempdir_registry = self.enter_context(tempdir_registry()) - # Intentionally set as early as possible so globally-managed temporary - # directories are available to the rest of the code. - self.enter_context(global_tempdir_manager()) - - options, args = self.parse_args(args) - - # Set verbosity so that it can be used elsewhere. - self.verbosity = options.verbose - options.quiet - - level_number = setup_logging( - verbosity=self.verbosity, - no_color=options.no_color, - user_log_file=options.log, - ) - - always_enabled_features = set(options.features_enabled) & set( - cmdoptions.ALWAYS_ENABLED_FEATURES - ) - if always_enabled_features: - logger.warning( - "The following features are always enabled: %s. ", - ", ".join(sorted(always_enabled_features)), - ) - - # Make sure that the --python argument isn't specified after the - # subcommand. We can tell, because if --python was specified, - # we should only reach this point if we're running in the created - # subprocess, which has the _PIP_RUNNING_IN_SUBPROCESS environment - # variable set. - if options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: - logger.critical( - "The --python option must be placed before the pip subcommand name" - ) - sys.exit(ERROR) - - # TODO: Try to get these passing down from the command? - # without resorting to os.environ to hold these. - # This also affects isolated builds and it should. - - if options.no_input: - os.environ["PIP_NO_INPUT"] = "1" - - if options.exists_action: - os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action) - - if options.require_venv and not self.ignore_require_venv: - # If a venv is required check if it can really be found - if not running_under_virtualenv(): - logger.critical("Could not find an activated virtualenv (required).") - sys.exit(VIRTUALENV_NOT_FOUND) - - if options.cache_dir: - options.cache_dir = normalize_path(options.cache_dir) - if not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "or is not writable by the current user. The cache " - "has been disabled. Check the permissions and owner of " - "that directory. If executing pip with sudo, you should " - "use sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - def intercepts_unhandled_exc( - run_func: Callable[..., int] - ) -> Callable[..., int]: - @functools.wraps(run_func) - def exc_logging_wrapper(*args: Any) -> int: - try: - status = run_func(*args) - assert isinstance(status, int) - return status - except DiagnosticPipError as exc: - logger.error("[present-rich] %s", exc) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except PreviousBuildDirError as exc: - logger.critical(str(exc)) - logger.debug("Exception information:", exc_info=True) - - return PREVIOUS_BUILD_DIR_ERROR - except ( - InstallationError, - UninstallationError, - BadCommand, - NetworkConnectionError, - ) as exc: - logger.critical(str(exc)) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except CommandError as exc: - logger.critical("%s", exc) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except BrokenStdoutLoggingError: - # Bypass our logger and write any remaining messages to - # stderr because stdout no longer works. - print("ERROR: Pipe to stdout was broken", file=sys.stderr) - if level_number <= logging.DEBUG: - traceback.print_exc(file=sys.stderr) - - return ERROR - except KeyboardInterrupt: - logger.critical("Operation cancelled by user") - logger.debug("Exception information:", exc_info=True) - - return ERROR - except BaseException: - logger.critical("Exception:", exc_info=True) - - return UNKNOWN_ERROR - - return exc_logging_wrapper - - try: - if not options.debug_mode: - run = intercepts_unhandled_exc(self.run) - else: - run = self.run - rich_traceback.install(show_locals=True) - return run(options, args) - finally: - self.handle_pip_version_check(options) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/clean.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/clean.py deleted file mode 100644 index 9413f7cfcb40a0eed0af78347f7d60ed367c2738..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/clean.py +++ /dev/null @@ -1,75 +0,0 @@ -"""distutils.command.clean - -Implements the Distutils 'clean' command.""" - -# contributed by Bastian Kleineidam , added 2000-03-18 - -import os -from ..core import Command -from ..dir_util import remove_tree -from distutils._log import log - - -class clean(Command): - description = "clean up temporary files from 'build' command" - user_options = [ - ('build-base=', 'b', "base build directory (default: 'build.build-base')"), - ( - 'build-lib=', - None, - "build directory for all modules (default: 'build.build-lib')", - ), - ('build-temp=', 't', "temporary build directory (default: 'build.build-temp')"), - ( - 'build-scripts=', - None, - "build directory for scripts (default: 'build.build-scripts')", - ), - ('bdist-base=', None, "temporary directory for built distributions"), - ('all', 'a', "remove all build output, not just temporary by-products"), - ] - - boolean_options = ['all'] - - def initialize_options(self): - self.build_base = None - self.build_lib = None - self.build_temp = None - self.build_scripts = None - self.bdist_base = None - self.all = None - - def finalize_options(self): - self.set_undefined_options( - 'build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('build_scripts', 'build_scripts'), - ('build_temp', 'build_temp'), - ) - self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) - - def run(self): - # remove the build/temp. directory (unless it's already - # gone) - if os.path.exists(self.build_temp): - remove_tree(self.build_temp, dry_run=self.dry_run) - else: - log.debug("'%s' does not exist -- can't clean it", self.build_temp) - - if self.all: - # remove build directories - for directory in (self.build_lib, self.bdist_base, self.build_scripts): - if os.path.exists(directory): - remove_tree(directory, dry_run=self.dry_run) - else: - log.warning("'%s' does not exist -- can't clean it", directory) - - # just for the heck of it, try to remove the base build directory: - # we might have emptied it right now, but if not we don't care - if not self.dry_run: - try: - os.rmdir(self.build_base) - log.info("removing '%s'", self.build_base) - except OSError: - pass diff --git a/spaces/plzdontcry/dakubettergpt/src/components/Menu/NewFolder.tsx b/spaces/plzdontcry/dakubettergpt/src/components/Menu/NewFolder.tsx deleted file mode 100644 index 443bf7f904d5961f11dce6b930770ac8cd043669..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/Menu/NewFolder.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import React from 'react'; -import { useTranslation } from 'react-i18next'; -import { v4 as uuidv4 } from 'uuid'; -import useStore from '@store/store'; - -import NewFolderIcon from '@icon/NewFolderIcon'; -import { Folder, FolderCollection } from '@type/chat'; - -const NewFolder = () => { - const { t } = useTranslation(); - const generating = useStore((state) => state.generating); - const setFolders = useStore((state) => state.setFolders); - - const addFolder = () => { - let folderIndex = 1; - let name = `New Folder ${folderIndex}`; - - const folders = useStore.getState().folders; - - while (Object.values(folders).some((folder) => folder.name === name)) { - folderIndex += 1; - name = `New Folder ${folderIndex}`; - } - - const updatedFolders: FolderCollection = JSON.parse( - JSON.stringify(folders) - ); - - const id = uuidv4(); - const newFolder: Folder = { - id, - name, - expanded: false, - order: 0, - }; - - Object.values(updatedFolders).forEach((folder) => { - folder.order += 1; - }); - - setFolders({ [id]: newFolder, ...updatedFolders }); - }; - - return ( - { - if (!generating) addFolder(); - }} - > - - - ); -}; - -export default NewFolder; diff --git a/spaces/ppsantiago/chatGPT/run_macOS.command b/spaces/ppsantiago/chatGPT/run_macOS.command deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/ppsantiago/chatGPT/run_macOS.command +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/svelte/svelte.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/svelte/svelte.js deleted file mode 100644 index c0fa323934575eecdc060f5967ddc7a25bf9df55..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/svelte/svelte.js +++ /dev/null @@ -1,3975 +0,0 @@ -/** @returns {void} */ -function noop() {} - -const identity = (x) => x; - -/** - * @template T - * @template S - * @param {T} tar - * @param {S} src - * @returns {T & S} - */ -function assign(tar, src) { - // @ts-ignore - for (const k in src) tar[k] = src[k]; - return /** @type {T & S} */ (tar); -} - -// Adapted from https://github.com/then/is-promise/blob/master/index.js -// Distributed under MIT License https://github.com/then/is-promise/blob/master/LICENSE -/** - * @param {any} value - * @returns {value is PromiseLike} - */ -function is_promise(value) { - return ( - !!value && - (typeof value === 'object' || typeof value === 'function') && - typeof (/** @type {any} */ (value).then) === 'function' - ); -} - -/** @returns {void} */ -function add_location(element, file, line, column, char) { - element.__svelte_meta = { - loc: { file, line, column, char } - }; -} - -function run(fn) { - return fn(); -} - -function blank_object() { - return Object.create(null); -} - -/** - * @param {Function[]} fns - * @returns {void} - */ -function run_all(fns) { - fns.forEach(run); -} - -/** - * @param {any} thing - * @returns {thing is Function} - */ -function is_function(thing) { - return typeof thing === 'function'; -} - -/** @returns {boolean} */ -function safe_not_equal(a, b) { - return a != a ? b == b : a !== b || (a && typeof a === 'object') || typeof a === 'function'; -} - -let src_url_equal_anchor; - -/** - * @param {string} element_src - * @param {string} url - * @returns {boolean} - */ -function src_url_equal(element_src, url) { - if (element_src === url) return true; - if (!src_url_equal_anchor) { - src_url_equal_anchor = document.createElement('a'); - } - // This is actually faster than doing URL(..).href - src_url_equal_anchor.href = url; - return element_src === src_url_equal_anchor.href; -} - -/** @param {string} srcset */ -function split_srcset(srcset) { - return srcset.split(',').map((src) => src.trim().split(' ').filter(Boolean)); -} - -/** - * @param {HTMLSourceElement | HTMLImageElement} element_srcset - * @param {string | undefined | null} srcset - * @returns {boolean} - */ -function srcset_url_equal(element_srcset, srcset) { - const element_urls = split_srcset(element_srcset.srcset); - const urls = split_srcset(srcset || ''); - - return ( - urls.length === element_urls.length && - urls.every( - ([url, width], i) => - width === element_urls[i][1] && - // We need to test both ways because Vite will create an a full URL with - // `new URL(asset, import.meta.url).href` for the client when `base: './'`, and the - // relative URLs inside srcset are not automatically resolved to absolute URLs by - // browsers (in contrast to img.src). This means both SSR and DOM code could - // contain relative or absolute URLs. - (src_url_equal(element_urls[i][0], url) || src_url_equal(url, element_urls[i][0])) - ) - ); -} - -/** @returns {boolean} */ -function not_equal(a, b) { - return a != a ? b == b : a !== b; -} - -/** @returns {boolean} */ -function is_empty(obj) { - return Object.keys(obj).length === 0; -} - -/** @returns {void} */ -function validate_store(store, name) { - if (store != null && typeof store.subscribe !== 'function') { - throw new Error(`'${name}' is not a store with a 'subscribe' method`); - } -} - -function subscribe(store, ...callbacks) { - if (store == null) { - for (const callback of callbacks) { - callback(undefined); - } - return noop; - } - const unsub = store.subscribe(...callbacks); - return unsub.unsubscribe ? () => unsub.unsubscribe() : unsub; -} - -/** - * Get the current value from a store by subscribing and immediately unsubscribing. - * - * https://svelte.dev/docs/svelte-store#get - * @template T - * @param {import('../store/public.js').Readable} store - * @returns {T} - */ -function get_store_value(store) { - let value; - subscribe(store, (_) => (value = _))(); - return value; -} - -/** @returns {void} */ -function component_subscribe(component, store, callback) { - component.$$.on_destroy.push(subscribe(store, callback)); -} - -function create_slot(definition, ctx, $$scope, fn) { - if (definition) { - const slot_ctx = get_slot_context(definition, ctx, $$scope, fn); - return definition[0](slot_ctx); - } -} - -function get_slot_context(definition, ctx, $$scope, fn) { - return definition[1] && fn ? assign($$scope.ctx.slice(), definition[1](fn(ctx))) : $$scope.ctx; -} - -function get_slot_changes(definition, $$scope, dirty, fn) { - if (definition[2] && fn) { - const lets = definition[2](fn(dirty)); - if ($$scope.dirty === undefined) { - return lets; - } - if (typeof lets === 'object') { - const merged = []; - const len = Math.max($$scope.dirty.length, lets.length); - for (let i = 0; i < len; i += 1) { - merged[i] = $$scope.dirty[i] | lets[i]; - } - return merged; - } - return $$scope.dirty | lets; - } - return $$scope.dirty; -} - -/** @returns {void} */ -function update_slot_base( - slot, - slot_definition, - ctx, - $$scope, - slot_changes, - get_slot_context_fn -) { - if (slot_changes) { - const slot_context = get_slot_context(slot_definition, ctx, $$scope, get_slot_context_fn); - slot.p(slot_context, slot_changes); - } -} - -/** @returns {void} */ -function update_slot( - slot, - slot_definition, - ctx, - $$scope, - dirty, - get_slot_changes_fn, - get_slot_context_fn -) { - const slot_changes = get_slot_changes(slot_definition, $$scope, dirty, get_slot_changes_fn); - update_slot_base(slot, slot_definition, ctx, $$scope, slot_changes, get_slot_context_fn); -} - -/** @returns {any[] | -1} */ -function get_all_dirty_from_scope($$scope) { - if ($$scope.ctx.length > 32) { - const dirty = []; - const length = $$scope.ctx.length / 32; - for (let i = 0; i < length; i++) { - dirty[i] = -1; - } - return dirty; - } - return -1; -} - -/** @returns {{}} */ -function exclude_internal_props(props) { - const result = {}; - for (const k in props) if (k[0] !== '$') result[k] = props[k]; - return result; -} - -/** @returns {{}} */ -function compute_rest_props(props, keys) { - const rest = {}; - keys = new Set(keys); - for (const k in props) if (!keys.has(k) && k[0] !== '$') rest[k] = props[k]; - return rest; -} - -/** @returns {{}} */ -function compute_slots(slots) { - const result = {}; - for (const key in slots) { - result[key] = true; - } - return result; -} - -/** @returns {(this: any, ...args: any[]) => void} */ -function once(fn) { - let ran = false; - return function (...args) { - if (ran) return; - ran = true; - fn.call(this, ...args); - }; -} - -function null_to_empty(value) { - return value == null ? '' : value; -} - -function set_store_value(store, ret, value) { - store.set(value); - return ret; -} - -const has_prop = (obj, prop) => Object.prototype.hasOwnProperty.call(obj, prop); - -function action_destroyer(action_result) { - return action_result && is_function(action_result.destroy) ? action_result.destroy : noop; -} - -/** @param {number | string} value - * @returns {[number, string]} - */ -function split_css_unit(value) { - const split = typeof value === 'string' && value.match(/^\s*(-?[\d.]+)([^\s]*)\s*$/); - return split ? [parseFloat(split[1]), split[2] || 'px'] : [/** @type {number} */ (value), 'px']; -} - -const contenteditable_truthy_values = ['', true, 1, 'true', 'contenteditable']; - -const is_client = typeof window !== 'undefined'; - -/** @type {() => number} */ -let now = is_client ? () => window.performance.now() : () => Date.now(); - -let raf = is_client ? (cb) => requestAnimationFrame(cb) : noop; - -// used internally for testing -/** @returns {void} */ -function set_now(fn) { - now = fn; -} - -/** @returns {void} */ -function set_raf(fn) { - raf = fn; -} - -const tasks = new Set(); - -/** - * @param {number} now - * @returns {void} - */ -function run_tasks(now) { - tasks.forEach((task) => { - if (!task.c(now)) { - tasks.delete(task); - task.f(); - } - }); - if (tasks.size !== 0) raf(run_tasks); -} - -/** - * For testing purposes only! - * @returns {void} - */ -function clear_loops() { - tasks.clear(); -} - -/** - * Creates a new task that runs on each raf frame - * until it returns a falsy value or is aborted - * @param {import('./private.js').TaskCallback} callback - * @returns {import('./private.js').Task} - */ -function loop(callback) { - /** @type {import('./private.js').TaskEntry} */ - let task; - if (tasks.size === 0) raf(run_tasks); - return { - promise: new Promise((fulfill) => { - tasks.add((task = { c: callback, f: fulfill })); - }), - abort() { - tasks.delete(task); - } - }; -} - -/** @type {typeof globalThis} */ -const globals = - typeof window !== 'undefined' - ? window - : typeof globalThis !== 'undefined' - ? globalThis - : // @ts-ignore Node typings have this - global; - -/** - * Resize observer singleton. - * One listener per element only! - * https://groups.google.com/a/chromium.org/g/blink-dev/c/z6ienONUb5A/m/F5-VcUZtBAAJ - */ -class ResizeObserverSingleton { - /** - * @private - * @readonly - * @type {WeakMap} - */ - _listeners = 'WeakMap' in globals ? new WeakMap() : undefined; - - /** - * @private - * @type {ResizeObserver} - */ - _observer = undefined; - - /** @type {ResizeObserverOptions} */ - options; - - /** @param {ResizeObserverOptions} options */ - constructor(options) { - this.options = options; - } - - /** - * @param {Element} element - * @param {import('./private.js').Listener} listener - * @returns {() => void} - */ - observe(element, listener) { - this._listeners.set(element, listener); - this._getObserver().observe(element, this.options); - return () => { - this._listeners.delete(element); - this._observer.unobserve(element); // this line can probably be removed - }; - } - - /** - * @private - */ - _getObserver() { - return ( - this._observer ?? - (this._observer = new ResizeObserver((entries) => { - for (const entry of entries) { - ResizeObserverSingleton.entries.set(entry.target, entry); - this._listeners.get(entry.target)?.(entry); - } - })) - ); - } -} - -// Needs to be written like this to pass the tree-shake-test -ResizeObserverSingleton.entries = 'WeakMap' in globals ? new WeakMap() : undefined; - -// Track which nodes are claimed during hydration. Unclaimed nodes can then be removed from the DOM -// at the end of hydration without touching the remaining nodes. -let is_hydrating = false; - -/** - * @returns {void} - */ -function start_hydrating() { - is_hydrating = true; -} - -/** - * @returns {void} - */ -function end_hydrating() { - is_hydrating = false; -} - -/** - * @param {number} low - * @param {number} high - * @param {(index: number) => number} key - * @param {number} value - * @returns {number} - */ -function upper_bound(low, high, key, value) { - // Return first index of value larger than input value in the range [low, high) - while (low < high) { - const mid = low + ((high - low) >> 1); - if (key(mid) <= value) { - low = mid + 1; - } else { - high = mid; - } - } - return low; -} - -/** - * @param {NodeEx} target - * @returns {void} - */ -function init_hydrate(target) { - if (target.hydrate_init) return; - target.hydrate_init = true; - // We know that all children have claim_order values since the unclaimed have been detached if target is not - - let children = /** @type {ArrayLike} */ (target.childNodes); - // If target is , there may be children without claim_order - if (target.nodeName === 'HEAD') { - const my_children = []; - for (let i = 0; i < children.length; i++) { - const node = children[i]; - if (node.claim_order !== undefined) { - my_children.push(node); - } - } - children = my_children; - } - /* - * Reorder claimed children optimally. - * We can reorder claimed children optimally by finding the longest subsequence of - * nodes that are already claimed in order and only moving the rest. The longest - * subsequence of nodes that are claimed in order can be found by - * computing the longest increasing subsequence of .claim_order values. - * - * This algorithm is optimal in generating the least amount of reorder operations - * possible. - * - * Proof: - * We know that, given a set of reordering operations, the nodes that do not move - * always form an increasing subsequence, since they do not move among each other - * meaning that they must be already ordered among each other. Thus, the maximal - * set of nodes that do not move form a longest increasing subsequence. - */ - // Compute longest increasing subsequence - // m: subsequence length j => index k of smallest value that ends an increasing subsequence of length j - const m = new Int32Array(children.length + 1); - // Predecessor indices + 1 - const p = new Int32Array(children.length); - m[0] = -1; - let longest = 0; - for (let i = 0; i < children.length; i++) { - const current = children[i].claim_order; - // Find the largest subsequence length such that it ends in a value less than our current value - // upper_bound returns first greater value, so we subtract one - // with fast path for when we are on the current longest subsequence - const seq_len = - (longest > 0 && children[m[longest]].claim_order <= current - ? longest + 1 - : upper_bound(1, longest, (idx) => children[m[idx]].claim_order, current)) - 1; - p[i] = m[seq_len] + 1; - const new_len = seq_len + 1; - // We can guarantee that current is the smallest value. Otherwise, we would have generated a longer sequence. - m[new_len] = i; - longest = Math.max(new_len, longest); - } - // The longest increasing subsequence of nodes (initially reversed) - - /** - * @type {NodeEx2[]} - */ - const lis = []; - // The rest of the nodes, nodes that will be moved - - /** - * @type {NodeEx2[]} - */ - const to_move = []; - let last = children.length - 1; - for (let cur = m[longest] + 1; cur != 0; cur = p[cur - 1]) { - lis.push(children[cur - 1]); - for (; last >= cur; last--) { - to_move.push(children[last]); - } - last--; - } - for (; last >= 0; last--) { - to_move.push(children[last]); - } - lis.reverse(); - // We sort the nodes being moved to guarantee that their insertion order matches the claim order - to_move.sort((a, b) => a.claim_order - b.claim_order); - // Finally, we move the nodes - for (let i = 0, j = 0; i < to_move.length; i++) { - while (j < lis.length && to_move[i].claim_order >= lis[j].claim_order) { - j++; - } - const anchor = j < lis.length ? lis[j] : null; - target.insertBefore(to_move[i], anchor); - } -} - -/** - * @param {Node} target - * @param {Node} node - * @returns {void} - */ -function append(target, node) { - target.appendChild(node); -} - -/** - * @param {Node} target - * @param {string} style_sheet_id - * @param {string} styles - * @returns {void} - */ -function append_styles(target, style_sheet_id, styles) { - const append_styles_to = get_root_for_style(target); - if (!append_styles_to.getElementById(style_sheet_id)) { - const style = element('style'); - style.id = style_sheet_id; - style.textContent = styles; - append_stylesheet(append_styles_to, style); - } -} - -/** - * @param {Node} node - * @returns {ShadowRoot | Document} - */ -function get_root_for_style(node) { - if (!node) return document; - const root = node.getRootNode ? node.getRootNode() : node.ownerDocument; - if (root && /** @type {ShadowRoot} */ (root).host) { - return /** @type {ShadowRoot} */ (root); - } - return node.ownerDocument; -} - -/** - * @param {Node} node - * @returns {CSSStyleSheet} - */ -function append_empty_stylesheet(node) { - const style_element = element('style'); - // For transitions to work without 'style-src: unsafe-inline' Content Security Policy, - // these empty tags need to be allowed with a hash as a workaround until we move to the Web Animations API. - // Using the hash for the empty string (for an empty tag) works in all browsers except Safari. - // So as a workaround for the workaround, when we append empty style tags we set their content to /* empty */. - // The hash 'sha256-9OlNO0DNEeaVzHL4RZwCLsBHA8WBQ8toBp/4F5XV2nc=' will then work even in Safari. - style_element.textContent = '/* empty */'; - append_stylesheet(get_root_for_style(node), style_element); - return style_element.sheet; -} - -/** - * @param {ShadowRoot | Document} node - * @param {HTMLStyleElement} style - * @returns {CSSStyleSheet} - */ -function append_stylesheet(node, style) { - append(/** @type {Document} */ (node).head || node, style); - return style.sheet; -} - -/** - * @param {NodeEx} target - * @param {NodeEx} node - * @returns {void} - */ -function append_hydration(target, node) { - if (is_hydrating) { - init_hydrate(target); - if ( - target.actual_end_child === undefined || - (target.actual_end_child !== null && target.actual_end_child.parentNode !== target) - ) { - target.actual_end_child = target.firstChild; - } - // Skip nodes of undefined ordering - while (target.actual_end_child !== null && target.actual_end_child.claim_order === undefined) { - target.actual_end_child = target.actual_end_child.nextSibling; - } - if (node !== target.actual_end_child) { - // We only insert if the ordering of this node should be modified or the parent node is not target - if (node.claim_order !== undefined || node.parentNode !== target) { - target.insertBefore(node, target.actual_end_child); - } - } else { - target.actual_end_child = node.nextSibling; - } - } else if (node.parentNode !== target || node.nextSibling !== null) { - target.appendChild(node); - } -} - -/** - * @param {Node} target - * @param {Node} node - * @param {Node} [anchor] - * @returns {void} - */ -function insert(target, node, anchor) { - target.insertBefore(node, anchor || null); -} - -/** - * @param {NodeEx} target - * @param {NodeEx} node - * @param {NodeEx} [anchor] - * @returns {void} - */ -function insert_hydration(target, node, anchor) { - if (is_hydrating && !anchor) { - append_hydration(target, node); - } else if (node.parentNode !== target || node.nextSibling != anchor) { - target.insertBefore(node, anchor || null); - } -} - -/** - * @param {Node} node - * @returns {void} - */ -function detach(node) { - if (node.parentNode) { - node.parentNode.removeChild(node); - } -} - -/** - * @returns {void} */ -function destroy_each(iterations, detaching) { - for (let i = 0; i < iterations.length; i += 1) { - if (iterations[i]) iterations[i].d(detaching); - } -} - -/** - * @template {keyof HTMLElementTagNameMap} K - * @param {K} name - * @returns {HTMLElementTagNameMap[K]} - */ -function element(name) { - return document.createElement(name); -} - -/** - * @template {keyof HTMLElementTagNameMap} K - * @param {K} name - * @param {string} is - * @returns {HTMLElementTagNameMap[K]} - */ -function element_is(name, is) { - return document.createElement(name, { is }); -} - -/** - * @template T - * @template {keyof T} K - * @param {T} obj - * @param {K[]} exclude - * @returns {Pick>} - */ -function object_without_properties(obj, exclude) { - const target = /** @type {Pick>} */ ({}); - for (const k in obj) { - if ( - has_prop(obj, k) && - // @ts-ignore - exclude.indexOf(k) === -1 - ) { - // @ts-ignore - target[k] = obj[k]; - } - } - return target; -} - -/** - * @template {keyof SVGElementTagNameMap} K - * @param {K} name - * @returns {SVGElement} - */ -function svg_element(name) { - return document.createElementNS('http://www.w3.org/2000/svg', name); -} - -/** - * @param {string} data - * @returns {Text} - */ -function text(data) { - return document.createTextNode(data); -} - -/** - * @returns {Text} */ -function space() { - return text(' '); -} - -/** - * @returns {Text} */ -function empty() { - return text(''); -} - -/** - * @param {string} content - * @returns {Comment} - */ -function comment(content) { - return document.createComment(content); -} - -/** - * @param {EventTarget} node - * @param {string} event - * @param {EventListenerOrEventListenerObject} handler - * @param {boolean | AddEventListenerOptions | EventListenerOptions} [options] - * @returns {() => void} - */ -function listen(node, event, handler, options) { - node.addEventListener(event, handler, options); - return () => node.removeEventListener(event, handler, options); -} - -/** - * @returns {(event: any) => any} */ -function prevent_default(fn) { - return function (event) { - event.preventDefault(); - // @ts-ignore - return fn.call(this, event); - }; -} - -/** - * @returns {(event: any) => any} */ -function stop_propagation(fn) { - return function (event) { - event.stopPropagation(); - // @ts-ignore - return fn.call(this, event); - }; -} - -/** - * @returns {(event: any) => any} */ -function stop_immediate_propagation(fn) { - return function (event) { - event.stopImmediatePropagation(); - // @ts-ignore - return fn.call(this, event); - }; -} - -/** - * @returns {(event: any) => void} */ -function self(fn) { - return function (event) { - // @ts-ignore - if (event.target === this) fn.call(this, event); - }; -} - -/** - * @returns {(event: any) => void} */ -function trusted(fn) { - return function (event) { - // @ts-ignore - if (event.isTrusted) fn.call(this, event); - }; -} - -/** - * @param {Element} node - * @param {string} attribute - * @param {string} [value] - * @returns {void} - */ -function attr(node, attribute, value) { - if (value == null) node.removeAttribute(attribute); - else if (node.getAttribute(attribute) !== value) node.setAttribute(attribute, value); -} -/** - * List of attributes that should always be set through the attr method, - * because updating them through the property setter doesn't work reliably. - * In the example of `width`/`height`, the problem is that the setter only - * accepts numeric values, but the attribute can also be set to a string like `50%`. - * If this list becomes too big, rethink this approach. - */ -const always_set_through_set_attribute = ['width', 'height']; - -/** - * @param {Element & ElementCSSInlineStyle} node - * @param {{ [x: string]: string }} attributes - * @returns {void} - */ -function set_attributes(node, attributes) { - // @ts-ignore - const descriptors = Object.getOwnPropertyDescriptors(node.__proto__); - for (const key in attributes) { - if (attributes[key] == null) { - node.removeAttribute(key); - } else if (key === 'style') { - node.style.cssText = attributes[key]; - } else if (key === '__value') { - /** @type {any} */ (node).value = node[key] = attributes[key]; - } else if ( - descriptors[key] && - descriptors[key].set && - always_set_through_set_attribute.indexOf(key) === -1 - ) { - node[key] = attributes[key]; - } else { - attr(node, key, attributes[key]); - } - } -} - -/** - * @param {Element & ElementCSSInlineStyle} node - * @param {{ [x: string]: string }} attributes - * @returns {void} - */ -function set_svg_attributes(node, attributes) { - for (const key in attributes) { - attr(node, key, attributes[key]); - } -} - -/** - * @param {Record} data_map - * @returns {void} - */ -function set_custom_element_data_map(node, data_map) { - Object.keys(data_map).forEach((key) => { - set_custom_element_data(node, key, data_map[key]); - }); -} - -/** - * @returns {void} */ -function set_custom_element_data(node, prop, value) { - const lower = prop.toLowerCase(); // for backwards compatibility with existing behavior we do lowercase first - if (lower in node) { - node[lower] = typeof node[lower] === 'boolean' && value === '' ? true : value; - } else if (prop in node) { - node[prop] = typeof node[prop] === 'boolean' && value === '' ? true : value; - } else { - attr(node, prop, value); - } -} - -/** - * @param {string} tag - */ -function set_dynamic_element_data(tag) { - return /-/.test(tag) ? set_custom_element_data_map : set_attributes; -} - -/** - * @returns {void} - */ -function xlink_attr(node, attribute, value) { - node.setAttributeNS('http://www.w3.org/1999/xlink', attribute, value); -} - -/** - * @param {HTMLElement} node - * @returns {string} - */ -function get_svelte_dataset(node) { - return node.dataset.svelteH; -} - -/** - * @returns {unknown[]} */ -function get_binding_group_value(group, __value, checked) { - const value = new Set(); - for (let i = 0; i < group.length; i += 1) { - if (group[i].checked) value.add(group[i].__value); - } - if (!checked) { - value.delete(__value); - } - return Array.from(value); -} - -/** - * @param {HTMLInputElement[]} group - * @returns {{ p(...inputs: HTMLInputElement[]): void; r(): void; }} - */ -function init_binding_group(group) { - /** - * @type {HTMLInputElement[]} */ - let _inputs; - return { - /* push */ p(...inputs) { - _inputs = inputs; - _inputs.forEach((input) => group.push(input)); - }, - /* remove */ r() { - _inputs.forEach((input) => group.splice(group.indexOf(input), 1)); - } - }; -} - -/** - * @param {number[]} indexes - * @returns {{ u(new_indexes: number[]): void; p(...inputs: HTMLInputElement[]): void; r: () => void; }} - */ -function init_binding_group_dynamic(group, indexes) { - /** - * @type {HTMLInputElement[]} */ - let _group = get_binding_group(group); - - /** - * @type {HTMLInputElement[]} */ - let _inputs; - - function get_binding_group(group) { - for (let i = 0; i < indexes.length; i++) { - group = group[indexes[i]] = group[indexes[i]] || []; - } - return group; - } - - /** - * @returns {void} */ - function push() { - _inputs.forEach((input) => _group.push(input)); - } - - /** - * @returns {void} */ - function remove() { - _inputs.forEach((input) => _group.splice(_group.indexOf(input), 1)); - } - return { - /* update */ u(new_indexes) { - indexes = new_indexes; - const new_group = get_binding_group(group); - if (new_group !== _group) { - remove(); - _group = new_group; - push(); - } - }, - /* push */ p(...inputs) { - _inputs = inputs; - push(); - }, - /* remove */ r: remove - }; -} - -/** @returns {number} */ -function to_number(value) { - return value === '' ? null : +value; -} - -/** @returns {any[]} */ -function time_ranges_to_array(ranges) { - const array = []; - for (let i = 0; i < ranges.length; i += 1) { - array.push({ start: ranges.start(i), end: ranges.end(i) }); - } - return array; -} - -/** - * @param {Element} element - * @returns {ChildNode[]} - */ -function children(element) { - return Array.from(element.childNodes); -} - -/** - * @param {ChildNodeArray} nodes - * @returns {void} - */ -function init_claim_info(nodes) { - if (nodes.claim_info === undefined) { - nodes.claim_info = { last_index: 0, total_claimed: 0 }; - } -} - -/** - * @template {ChildNodeEx} R - * @param {ChildNodeArray} nodes - * @param {(node: ChildNodeEx) => node is R} predicate - * @param {(node: ChildNodeEx) => ChildNodeEx | undefined} process_node - * @param {() => R} create_node - * @param {boolean} dont_update_last_index - * @returns {R} - */ -function claim_node(nodes, predicate, process_node, create_node, dont_update_last_index = false) { - // Try to find nodes in an order such that we lengthen the longest increasing subsequence - init_claim_info(nodes); - const result_node = (() => { - // We first try to find an element after the previous one - for (let i = nodes.claim_info.last_index; i < nodes.length; i++) { - const node = nodes[i]; - if (predicate(node)) { - const replacement = process_node(node); - if (replacement === undefined) { - nodes.splice(i, 1); - } else { - nodes[i] = replacement; - } - if (!dont_update_last_index) { - nodes.claim_info.last_index = i; - } - return node; - } - } - // Otherwise, we try to find one before - // We iterate in reverse so that we don't go too far back - for (let i = nodes.claim_info.last_index - 1; i >= 0; i--) { - const node = nodes[i]; - if (predicate(node)) { - const replacement = process_node(node); - if (replacement === undefined) { - nodes.splice(i, 1); - } else { - nodes[i] = replacement; - } - if (!dont_update_last_index) { - nodes.claim_info.last_index = i; - } else if (replacement === undefined) { - // Since we spliced before the last_index, we decrease it - nodes.claim_info.last_index--; - } - return node; - } - } - // If we can't find any matching node, we create a new one - return create_node(); - })(); - result_node.claim_order = nodes.claim_info.total_claimed; - nodes.claim_info.total_claimed += 1; - return result_node; -} - -/** - * @param {ChildNodeArray} nodes - * @param {string} name - * @param {{ [key: string]: boolean }} attributes - * @param {(name: string) => Element | SVGElement} create_element - * @returns {Element | SVGElement} - */ -function claim_element_base(nodes, name, attributes, create_element) { - return claim_node( - nodes, - /** @returns {node is Element | SVGElement} */ - (node) => node.nodeName === name, - /** @param {Element} node */ - (node) => { - const remove = []; - for (let j = 0; j < node.attributes.length; j++) { - const attribute = node.attributes[j]; - if (!attributes[attribute.name]) { - remove.push(attribute.name); - } - } - remove.forEach((v) => node.removeAttribute(v)); - return undefined; - }, - () => create_element(name) - ); -} - -/** - * @param {ChildNodeArray} nodes - * @param {string} name - * @param {{ [key: string]: boolean }} attributes - * @returns {Element | SVGElement} - */ -function claim_element(nodes, name, attributes) { - return claim_element_base(nodes, name, attributes, element); -} - -/** - * @param {ChildNodeArray} nodes - * @param {string} name - * @param {{ [key: string]: boolean }} attributes - * @returns {Element | SVGElement} - */ -function claim_svg_element(nodes, name, attributes) { - return claim_element_base(nodes, name, attributes, svg_element); -} - -/** - * @param {ChildNodeArray} nodes - * @returns {Text} - */ -function claim_text(nodes, data) { - return claim_node( - nodes, - /** @returns {node is Text} */ - (node) => node.nodeType === 3, - /** @param {Text} node */ - (node) => { - const data_str = '' + data; - if (node.data.startsWith(data_str)) { - if (node.data.length !== data_str.length) { - return node.splitText(data_str.length); - } - } else { - node.data = data_str; - } - }, - () => text(data), - true // Text nodes should not update last index since it is likely not worth it to eliminate an increasing subsequence of actual elements - ); -} - -/** - * @returns {Text} */ -function claim_space(nodes) { - return claim_text(nodes, ' '); -} - -/** - * @param {ChildNodeArray} nodes - * @returns {Comment} - */ -function claim_comment(nodes, data) { - return claim_node( - nodes, - /** @returns {node is Comment} */ - (node) => node.nodeType === 8, - /** @param {Comment} node */ - (node) => { - node.data = '' + data; - return undefined; - }, - () => comment(data), - true - ); -} - -function get_comment_idx(nodes, text, start) { - for (let i = start; i < nodes.length; i += 1) { - const node = nodes[i]; - if (node.nodeType === 8 /* comment node */ && node.textContent.trim() === text) { - return i; - } - } - return -1; -} - -/** - * @param {boolean} is_svg - * @returns {HtmlTagHydration} - */ -function claim_html_tag(nodes, is_svg) { - // find html opening tag - const start_index = get_comment_idx(nodes, 'HTML_TAG_START', 0); - const end_index = get_comment_idx(nodes, 'HTML_TAG_END', start_index + 1); - if (start_index === -1 || end_index === -1) { - return new HtmlTagHydration(is_svg); - } - - init_claim_info(nodes); - const html_tag_nodes = nodes.splice(start_index, end_index - start_index + 1); - detach(html_tag_nodes[0]); - detach(html_tag_nodes[html_tag_nodes.length - 1]); - const claimed_nodes = html_tag_nodes.slice(1, html_tag_nodes.length - 1); - for (const n of claimed_nodes) { - n.claim_order = nodes.claim_info.total_claimed; - nodes.claim_info.total_claimed += 1; - } - return new HtmlTagHydration(is_svg, claimed_nodes); -} - -/** - * @param {Text} text - * @param {unknown} data - * @returns {void} - */ -function set_data(text, data) { - data = '' + data; - if (text.data === data) return; - text.data = /** @type {string} */ (data); -} - -/** - * @param {Text} text - * @param {unknown} data - * @returns {void} - */ -function set_data_contenteditable(text, data) { - data = '' + data; - if (text.wholeText === data) return; - text.data = /** @type {string} */ (data); -} - -/** - * @param {Text} text - * @param {unknown} data - * @param {string} attr_value - * @returns {void} - */ -function set_data_maybe_contenteditable(text, data, attr_value) { - if (~contenteditable_truthy_values.indexOf(attr_value)) { - set_data_contenteditable(text, data); - } else { - set_data(text, data); - } -} - -/** - * @returns {void} */ -function set_input_value(input, value) { - input.value = value == null ? '' : value; -} - -/** - * @returns {void} */ -function set_input_type(input, type) { - try { - input.type = type; - } catch (e) { - // do nothing - } -} - -/** - * @returns {void} */ -function set_style(node, key, value, important) { - if (value == null) { - node.style.removeProperty(key); - } else { - node.style.setProperty(key, value, important ? 'important' : ''); - } -} - -/** - * @returns {void} */ -function select_option(select, value, mounting) { - for (let i = 0; i < select.options.length; i += 1) { - const option = select.options[i]; - if (option.__value === value) { - option.selected = true; - return; - } - } - if (!mounting || value !== undefined) { - select.selectedIndex = -1; // no option should be selected - } -} - -/** - * @returns {void} */ -function select_options(select, value) { - for (let i = 0; i < select.options.length; i += 1) { - const option = select.options[i]; - option.selected = ~value.indexOf(option.__value); - } -} - -function select_value(select) { - const selected_option = select.querySelector(':checked'); - return selected_option && selected_option.__value; -} - -function select_multiple_value(select) { - return [].map.call(select.querySelectorAll(':checked'), (option) => option.__value); -} -// unfortunately this can't be a constant as that wouldn't be tree-shakeable -// so we cache the result instead - -/** - * @type {boolean} */ -let crossorigin; - -/** - * @returns {boolean} */ -function is_crossorigin() { - if (crossorigin === undefined) { - crossorigin = false; - try { - if (typeof window !== 'undefined' && window.parent) { - void window.parent.document; - } - } catch (error) { - crossorigin = true; - } - } - return crossorigin; -} - -/** - * @param {HTMLElement} node - * @param {() => void} fn - * @returns {() => void} - */ -function add_iframe_resize_listener(node, fn) { - const computed_style = getComputedStyle(node); - if (computed_style.position === 'static') { - node.style.position = 'relative'; - } - const iframe = element('iframe'); - iframe.setAttribute( - 'style', - 'display: block; position: absolute; top: 0; left: 0; width: 100%; height: 100%; ' + - 'overflow: hidden; border: 0; opacity: 0; pointer-events: none; z-index: -1;' - ); - iframe.setAttribute('aria-hidden', 'true'); - iframe.tabIndex = -1; - const crossorigin = is_crossorigin(); - - /** - * @type {() => void} - */ - let unsubscribe; - if (crossorigin) { - iframe.src = "data:text/html,"; - unsubscribe = listen( - window, - 'message', - /** @param {MessageEvent} event */ (event) => { - if (event.source === iframe.contentWindow) fn(); - } - ); - } else { - iframe.src = 'about:blank'; - iframe.onload = () => { - unsubscribe = listen(iframe.contentWindow, 'resize', fn); - // make sure an initial resize event is fired _after_ the iframe is loaded (which is asynchronous) - // see https://github.com/sveltejs/svelte/issues/4233 - fn(); - }; - } - append(node, iframe); - return () => { - if (crossorigin) { - unsubscribe(); - } else if (unsubscribe && iframe.contentWindow) { - unsubscribe(); - } - detach(iframe); - }; -} -const resize_observer_content_box = /* @__PURE__ */ new ResizeObserverSingleton({ - box: 'content-box' -}); -const resize_observer_border_box = /* @__PURE__ */ new ResizeObserverSingleton({ - box: 'border-box' -}); -const resize_observer_device_pixel_content_box = /* @__PURE__ */ new ResizeObserverSingleton( - { box: 'device-pixel-content-box' } -); - -/** - * @returns {void} */ -function toggle_class(element, name, toggle) { - // The `!!` is required because an `undefined` flag means flipping the current state. - element.classList.toggle(name, !!toggle); -} - -/** - * @template T - * @param {string} type - * @param {T} [detail] - * @param {{ bubbles?: boolean, cancelable?: boolean }} [options] - * @returns {CustomEvent} - */ -function custom_event(type, detail, { bubbles = false, cancelable = false } = {}) { - return new CustomEvent(type, { detail, bubbles, cancelable }); -} - -/** - * @param {string} selector - * @param {HTMLElement} parent - * @returns {ChildNodeArray} - */ -function query_selector_all(selector, parent = document.body) { - return Array.from(parent.querySelectorAll(selector)); -} - -/** - * @param {string} nodeId - * @param {HTMLElement} head - * @returns {any[]} - */ -function head_selector(nodeId, head) { - const result = []; - let started = 0; - for (const node of head.childNodes) { - if (node.nodeType === 8 /* comment node */) { - const comment = node.textContent.trim(); - if (comment === `HEAD_${nodeId}_END`) { - started -= 1; - result.push(node); - } else if (comment === `HEAD_${nodeId}_START`) { - started += 1; - result.push(node); - } - } else if (started > 0) { - result.push(node); - } - } - return result; -} -/** */ -class HtmlTag { - /** - * @private - * @default false - */ - is_svg = false; - /** parent for creating node */ - e = undefined; - /** html tag nodes */ - n = undefined; - /** target */ - t = undefined; - /** anchor */ - a = undefined; - constructor(is_svg = false) { - this.is_svg = is_svg; - this.e = this.n = null; - } - - /** - * @param {string} html - * @returns {void} - */ - c(html) { - this.h(html); - } - - /** - * @param {string} html - * @param {HTMLElement | SVGElement} target - * @param {HTMLElement | SVGElement} anchor - * @returns {void} - */ - m(html, target, anchor = null) { - if (!this.e) { - if (this.is_svg) - this.e = svg_element(/** @type {keyof SVGElementTagNameMap} */ (target.nodeName)); - /** #7364 target for