diff --git a/spaces/0x90e/ESRGAN-MANGA/inference.py b/spaces/0x90e/ESRGAN-MANGA/inference.py deleted file mode 100644 index 3c104422675598d988b802fa275be84a52f96472..0000000000000000000000000000000000000000 --- a/spaces/0x90e/ESRGAN-MANGA/inference.py +++ /dev/null @@ -1,59 +0,0 @@ -import sys -import cv2 -import numpy as np -import torch -import ESRGAN.architecture as esrgan -import ESRGAN_plus.architecture as esrgan_plus -from run_cmd import run_cmd -from ESRGANer import ESRGANer - -def is_cuda(): - if torch.cuda.is_available(): - return True - else: - return False - -model_type = sys.argv[2] - -if model_type == "Anime": - model_path = "models/4x-AnimeSharp.pth" -if model_type == "Photo": - model_path = "models/4x_Valar_v1.pth" -else: - model_path = "models/4x_NMKD-Siax_200k.pth" - -OUTPUT_PATH = sys.argv[1] -device = torch.device('cuda' if is_cuda() else 'cpu') - -if model_type != "Photo": - model = esrgan.RRDB_Net(3, 3, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv') -else: - model = esrgan_plus.RRDB_Net(3, 3, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv') - -if is_cuda(): - print("Using GPU 🥶") - model.load_state_dict(torch.load(model_path), strict=True) -else: - print("Using CPU 😒") - model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=True) - -model.eval() - -for k, v in model.named_parameters(): - v.requires_grad = False -model = model.to(device) - -# Read image -img = cv2.imread(OUTPUT_PATH, cv2.IMREAD_COLOR) -img = img * 1.0 / 255 -img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float() -img_LR = img.unsqueeze(0) -img_LR = img_LR.to(device) - -upsampler = ESRGANer(model=model) -output = upsampler.enhance(img_LR) - -output = output.squeeze().float().cpu().clamp_(0, 1).numpy() -output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0)) -output = (output * 255.0).round() -cv2.imwrite(OUTPUT_PATH, output, [int(cv2.IMWRITE_PNG_COMPRESSION), 5]) \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/8x8 Work for Windows The Ultimate Communication and Collaboration Platform for PC.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/8x8 Work for Windows The Ultimate Communication and Collaboration Platform for PC.md deleted file mode 100644 index 470169e4b1d68dfbf169ae7cb6e4a8bbde6eb744..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/8x8 Work for Windows The Ultimate Communication and Collaboration Platform for PC.md +++ /dev/null @@ -1,46 +0,0 @@ -
-# How to Download and Install 8x8 Work for Windows - -8x8 Work is a cloud-based communication and collaboration platform that allows you to make voice and video calls, send messages, share files and more. It is designed to help you work smarter and faster from anywhere. If you want to use 8x8 Work on your Windows PC, you need to download and install the 8x8 Work for Windows app, which is available as an MSI file. In this article, we will show you how to do it in a few simple steps. - -## Step 1: Download the 8x8 Work for Windows MSI File - -The first thing you need to do is to download the 8x8 Work for Windows MSI file from the official website. To do this, go to the link below and click on the "Download" button. - -[Download 8x8 Work for Windows Here](#1) - -This will start the download of the MSI file, which is about 100 MB in size. Save the file to your preferred location on your PC. - -## Step 2: Run the 8x8 Work for Windows MSI File - -Once the download is complete, you need to run the 8x8 Work for Windows MSI file to start the installation process. To do this, locate the file on your PC and double-click on it. This will launch the installer, which will guide you through the installation process. Follow the instructions on the screen and accept the terms and conditions. You can also choose the destination folder where you want to install the app. The default location is "C:\Program Files (x86)\8x8\Work", but you can change it if you want. - -## Step 3: Launch the 8x8 Work for Windows App - -After the installation is finished, you can launch the 8x8 Work for Windows app from the desktop shortcut or from the start menu. You will be asked to sign in with your 8x8 username and password. If you don't have an account yet, you can create one from the app or from the website. Once you sign in, you can access all the features and tools of 8x8 Work, such as making calls, sending messages, joining meetings, sharing files and more. - -## Step 4: Enjoy 8x8 Work for Windows - -You are done! You have successfully downloaded and installed 8x8 Work for Windows on your PC. You can now use it to communicate and collaborate with your team members, clients and partners from anywhere. You can also adjust the app settings, such as notifications, audio and video devices, language and updates, from the options menu in the app. Have fun! - -## 8x8 Work for Windows Features - -8x8 Work for Windows has many features that make it a powerful and versatile communication and collaboration platform. Some of these features are: - -- Voice and Video Calls: You can make high-quality voice and video calls to anyone in your 8x8 contact list or to any phone number. You can also transfer, hold, mute and record calls, as well as use call waiting and caller ID. You can also join or host conference calls with up to 100 participants. -- Messaging: You can send and receive instant messages to anyone in your 8x8 contact list or to any phone number. You can also create group chats, send emojis and stickers, share files and images, and delete or edit messages. You can also sync your messages across all your devices. -- Meetings: You can join or host online meetings with up to 100 participants. You can also share your screen, use a virtual background, chat with other participants, and record and save meetings. You can also schedule meetings from the app or from your calendar app. -- Files: You can share and access files from your 8x8 cloud storage or from other cloud services, such as Google Drive, Dropbox and OneDrive. You can also preview, download and delete files, as well as search for files by name or type. -- Contacts: You can manage your 8x8 contact list or import contacts from other sources, such as Outlook, Gmail and LinkedIn. You can also search for contacts by name, number or email, as well as add, edit or delete contacts. You can also view your contact's availability status and presence information. - -## 8x8 Work for Windows Benefits - -8x8 Work for Windows has many benefits that make it a valuable and convenient communication and collaboration platform. Some of these benefits are: - -- Easy to Use: 8x8 Work for Windows has a simple and intuitive user interface that makes it easy to use and navigate. You can access all the features and tools from the main menu or from the toolbar. You can also customize the app according to your preferences and needs. -- Secure and Reliable: 8x8 Work for Windows uses encryption and authentication to ensure the security and privacy of your data and communications. It also has a robust cloud infrastructure that ensures the reliability and availability of the service. You can also use the app offline or in low-bandwidth situations. -- Flexible and Scalable: 8x8 Work for Windows adapts to your business needs and goals. You can choose from different plans and features that suit your budget and requirements. You can also add or remove users, devices and extensions as you grow or change. -- Compatible and Integrable: 8x8 Work for Windows works seamlessly with other 8x8 products and services, such as 8x8 Contact Center, 8x8 Analytics and 8x8 Voice for Microsoft Teams. It also integrates with other popular apps and platforms, such as Outlook, Gmail, Salesforce, Zendesk and Slack.

-

8x8 download msi


DOWNLOADhttps://byltly.com/2uKzDK



ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (tamil dubbed 1080p movies Housefull) - Enjoy the funniest Bollywood film in Tamil language.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (tamil dubbed 1080p movies Housefull) - Enjoy the funniest Bollywood film in Tamil language.md deleted file mode 100644 index 0812409b43a573c935faa81102229122c2557600..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (tamil dubbed 1080p movies Housefull) - Enjoy the funniest Bollywood film in Tamil language.md +++ /dev/null @@ -1,206 +0,0 @@ -
-

HD Online Player (tamil dubbed 1080p movies Housefull)

-

If you are a fan of comedy movies and want to watch them in high definition, you might be interested in HD Online Player. This is a free online video player that lets you stream and download tamil dubbed 1080p movies, including the hilarious Housefull series. In this article, we will tell you more about HD Online Player, tamil dubbed 1080p movies, and how to watch Housefull in HD online.

-

HD Online Player (tamil dubbed 1080p movies Housefull)


Download Zip ☆☆☆☆☆ https://byltly.com/2uKwuT



-

What is HD Online Player?

-

HD Online Player is a free online video player that supports HTML5 and MP4 formats. It allows you to watch videos online without downloading them or installing any software. You can just copy and paste the video URL into the player and enjoy high-quality streaming and ad-free viewing.

-

Features of HD Online Player

-

Some of the features of HD Online Player are:

- -

Benefits of HD Online Player

-

Some of the benefits of using HD Online Player are:

- -

What are tamil dubbed 1080p movies?

-

Tamil dubbed 1080p movies are movies that have been dubbed in Tamil language and have a display resolution width of approximately 1080 pixels. Tamil is one of the official languages of India and Sri Lanka, and is spoken by millions of people around the world. Tamil dubbed 1080p movies are popular among Tamil speakers who want to enjoy movies from other languages and cultures in their own language.

-

Definition and examples of tamil dubbed 1080p movies

-

A tamil dubbed 1080p movie is a movie that has been dubbed in Tamil language and has a display resolution width of approximately 1080 pixels. Dubbing is the process of replacing the original dialogue of a movie with a different language. A 1080p movie is a movie that has a display resolution width of approximately 1080 pixels, which is considered high definition (HD).

-

Watch Housefull tamil dubbed HD online free
-Housefull full movie in tamil 1080p download
-Tamilrockers Housefull tamil dubbed movie online
-Housefull 4 tamil dubbed HD online streaming
-Housefull 3 tamil 1080p movie watch online
-Housefull 2 tamil dubbed full movie HD
-Housefull series tamil dubbed movies online
-HD Online Player for tamil dubbed movies
-Tamil dubbed comedy movies online 1080p
-Housefull cast and crew in tamil dubbed version
-How to download Housefull tamil dubbed movie HD
-Housefull movie review in tamil language
-Housefull songs and lyrics in tamil dubbed
-Housefull trivia and facts in tamil language
-Housefull movie scenes and dialogues in tamil
-Best sites to watch Housefull tamil dubbed online
-Housefull movie rating and box office in tamil nadu
-Housefull movie awards and nominations in tamil cinema
-Housefull movie sequel and prequel in tamil dubbed
-Housefull movie remake and inspiration in tamil cinema
-Housefull movie genre and theme in tamil language
-Housefull movie plot and story in tamil language
-Housefull movie characters and actors in tamil dubbed
-Housefull movie director and producer in tamil cinema
-Housefull movie release date and trailer in tamil dubbed
-Similar movies to Housefull in tamil dubbed language
-HD Online Player app for android and ios devices
-HD Online Player features and benefits for users
-HD Online Player subscription and pricing plans
-HD Online Player customer reviews and ratings
-How to install HD Online Player on your device
-How to use HD Online Player for watching movies online
-How to troubleshoot HD Online Player issues and errors
-How to contact HD Online Player support team
-How to update HD Online Player to the latest version
-Advantages of watching movies online with HD Online Player
-Disadvantages of watching movies online with HD Online Player
-Alternatives to HD Online Player for watching movies online
-Comparison of HD Online Player with other online players
-Tips and tricks for using HD Online Player effectively
-FAQs about HD Online Player and its services
-Privacy policy and terms of service of HD Online Player
-How to cancel HD Online Player subscription and account
-How to get a refund from HD Online Player if not satisfied
-How to share feedback and suggestions with HD Online Player team
-How to join HD Online Player community and forum
-How to access HD Online Player premium content and offers
-How to earn rewards and points with HD Online Player
-How to redeem coupons and vouchers with HD Online Player

-

Some examples of tamil dubbed 1080p movies are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TitleOriginal LanguageGenreSynopsis
HousefullHindiComedyA man who believes he is cursed with bad luck tries to find true love with the help of his best friend.
The AvengersEnglishAction/Sci-FiA team of superheroes must stop an alien invasion led by a rogue god.
BaahubaliTeluguEpic/FantasyA young man learns about his royal heritage and sets out to reclaim his throne from an evil tyrant.
The Lion KingEnglishAnimation/MusicalA lion cub runs away from his kingdom after his father's death and returns as an adult to challenge his uncle.
Pirates of the CaribbeanEnglishAdventure/FantasyA pirate captain and a blacksmith join forces to rescue a governor's daughter from a cursed crew of undead pirates.
-

Popular genres and titles of tamil dubbed 1080p movies

-

Tamil dubbed 1080p movies cover a wide range of genres and themes, from comedy to action, from romance to horror, from drama to fantasy, and more. Some of the popular genres and titles of tamil dubbed 1080p movies are:

- -

Advantages and disadvantages of tamil dubbed 1080p movies

-

Tamil dubbed 1080p movies have their own advantages and disadvantages. Some of them are:

- - -
AdvantagesDisadvantages
  • You can enjoy movies from other languages and cultures in your own language.
  • You can watch movies in high quality and resolution without compromising on speed or performance.
  • You can access a large collection of movies from various genres and themes online for free or at low cost.
  • You may miss out on the original voice acting and expressions of the actors.
  • You may encounter poor dubbing quality or synchronization issues in some cases.
  • You may face legal or ethical issues if you watch pirated or unauthorized copies of movies online.
-

How to watch Housefull in HD online?

-

If you want to watch Housefull in HD online using HD Online Player, you need to follow these steps:

-

Introduction and synopsis of Housefull

-

Housefull is a 2010 Indian Hindi-language comedy film directed by Sajid Khan and starring Akshay Kumar, Riteish Deshmukh, Arjun Rampal, Deepika Padukone, Lara Dutta, and Jiah Khan. It is the first installment in the Housefull film series. ), a man who believes he is cursed with bad luck and tries to find true love with the help of his best friend Bob (Deshmukh). However, his attempts lead to hilarious complications and misunderstandings involving three women: Sandy (Padukone), Devika (Khan), and Hetal (Dutta). Meanwhile, Bob's brother-in-law Major Krishna Rao (Rampal) suspects that Aarush and Bob are having affairs with his wife Pooja (Malaika Arora Khan) and sister Hetal.

-

Housefull is a fun-filled comedy that will make you laugh out loud with its witty dialogues, hilarious situations, and amazing performances. It is a perfect movie to watch with your friends or family.

-

Steps to watch Housefull in HD online using HD Online Player

-

To watch Housefull in HD online using HD Online Player, you need to follow these steps:

-
    -
  1. Go to the website of HD Online Player and click on the "Video Player" option.
  2. -
  3. Copy and paste the URL of the video source of Housefull in the player. You can find the URL from various online platforms that offer tamil dubbed 1080p movies, such as TamilRockers, Moviesda, Isaimini, etc. However, be careful of the legal and ethical issues involved in watching pirated or unauthorized copies of movies online.
  4. -
  5. Click on the "Play" button and enjoy watching Housefull in HD online. You can also customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more.
  6. -
  7. You can also share your video link with your friends or colleagues by clicking on the "Share" button. You can also embed your video on your website, blog, or social media platforms.
  8. -
-

Tips and tricks to enhance your viewing experience

-

Here are some tips and tricks to enhance your viewing experience while watching Housefull in HD online using HD Online Player:

- -

Conclusion

-

In conclusion, HD Online Player is a free online video player that lets you watch videos online without downloading them or installing any software. It supports HTML5 and MP4 formats and offers high-quality streaming and ad-free viewing. You can also customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more. You can also share your videos with your friends or colleagues easily by sending them the link to your video. You can also embed your videos on your website, blog, or social media platforms.

-

Tamil dubbed 1080p movies are movies that have been dubbed in Tamil language and have a display resolution width of approximately 1080 pixels. They are popular among Tamil speakers who want to enjoy movies from other languages and cultures in their own language. They cover a wide range of genres and themes, from comedy to action, from romance to horror, from drama to fantasy, and more. Some of the popular genres and titles of tamil dubbed 1080p movies are comedy (Housefull series), action (The Avengers series), romance (Titanic), horror (The Conjuring series), drama (The Godfather series), fantasy (Harry Potter series), etc.

-

Housefull is a 2010 Indian Hindi-language comedy film directed by Sajid Khan and starring Akshay Kumar, Riteish Deshmukh, Arjun Rampal, Deepika Padukone, Lara Dutta, and Jiah Khan. It is the first installment in the Housefull film series. ), a man who believes he is cursed with bad luck and tries to find true love with the help of his best friend Bob (Deshmukh). However, his attempts lead to hilarious complications and misunderstandings involving three women: Sandy (Padukone), Devika (Khan), and Hetal (Dutta). Meanwhile, Bob's brother-in-law Major Krishna Rao (Rampal) suspects that Aarush and Bob are having affairs with his wife Pooja (Malaika Arora Khan) and sister Hetal.

-

Housefull is a fun-filled comedy that will make you laugh out loud with its witty dialogues, hilarious situations, and amazing performances. It is a perfect movie to watch with your friends or family. You can watch Housefull in HD online using HD Online Player by following the steps mentioned above. You can also use the tips and tricks to enhance your viewing experience.

-

We hope you enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy watching!

-

FAQs

-

Here are some frequently asked questions about HD Online Player, tamil dubbed 1080p movies, and Housefull:

-
    -
  1. What are the advantages of using HD Online Player over other online video players?
  2. -

    Some of the advantages of using HD Online Player over other online video players are:

    - -
  3. What are the disadvantages of watching tamil dubbed 1080p movies online?
  4. -

    Some of the disadvantages of watching tamil dubbed 1080p movies online are:

    - -
  5. What are some of the popular genres and titles of tamil dubbed 1080p movies?
  6. -

    Some of the popular genres and titles of tamil dubbed 1080p movies are:

    - -
  7. What is the plot of Housefull?
  8. -

    The plot of Housefull is:

    -), his attempts lead to hilarious complications and misunderstandings involving three women: Sandy (Padukone), Devika (Khan), and Hetal (Dutta). Meanwhile, Bob's brother-in-law Major Krishna Rao (Rampal) suspects that Aarush and Bob are having affairs with his wife Pooja (Malaika Arora Khan) and sister Hetal.

    -
  9. How can I watch Housefull in HD online using HD Online Player?
  10. -

    To watch Housefull in HD online using HD Online Player, you need to follow these steps:

    -
      -
    1. Go to the website of HD Online Player and click on the "Video Player" option.
    2. -
    3. Copy and paste the URL of the video source of Housefull in the player. You can find the URL from various online platforms that offer tamil dubbed 1080p movies, such as TamilRockers, Moviesda, Isaimini, etc. However, be careful of the legal and ethical issues involved in watching pirated or unauthorized copies of movies online.
    4. -
    5. Click on the "Play" button and enjoy watching Housefull in HD online. You can also customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more.
    6. -
    7. You can also share your video link with your friends or colleagues by clicking on the "Share" button. You can also embed your video on your website, blog, or social media platforms.
    8. -
    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Marksman MOD APK The ultimate simulation game with unlimited money gold wood metal and more.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Marksman MOD APK The ultimate simulation game with unlimited money gold wood metal and more.md deleted file mode 100644 index e4c7755ad061543071ced2ca2944342ec22e9068..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Marksman MOD APK The ultimate simulation game with unlimited money gold wood metal and more.md +++ /dev/null @@ -1,108 +0,0 @@ -
-

American Marksman MOD APK: A Shooting Game with Unlimited Money

-

Introduction

-

Do you love shooting games? Do you want to test your skills as a marksman and complete challenging missions? If yes, then you should try American Marksman, a simulation game that lets you experience the thrill of being a sniper. But wait, there's more! You can also enjoy the game with unlimited money, gold, wood, and metal by downloading the American Marksman MOD APK. In this article, we will tell you everything you need to know about this amazing mod, including its features, how to download and install it, and some FAQs.

-

american marksman unlimited money apk download


DOWNLOAD ::: https://urlin.us/2uST67



-

What is American Marksman?

-

American Marksman is a simulation game developed by Game Pickle. It is available for Android devices and has more than 1 million downloads on Google Play Store. The game puts you in the role of a sniper who has to complete various missions, such as assassinating targets, protecting allies, or destroying enemy bases. You can choose from a wide range of weapons, such as rifles, pistols, shotguns, or grenades. You can also upgrade your weapons and equipment to improve your performance and accuracy. The game has realistic graphics and sound effects that make you feel like you are in a real battlefield.

-

Why download American Marksman MOD APK?

-

While American Marksman is a fun and addictive game, it also has some drawbacks. For example, you need to spend real money to buy more gold, wood, or metal, which are essential resources for upgrading your weapons and equipment. You also have to watch ads to get extra rewards or bonuses. These can be annoying and frustrating for some players who just want to enjoy the game without any interruptions or limitations. That's why downloading the American Marksman MOD APK is a great idea. This mod gives you unlimited money, gold, wood, and metal, so you can buy anything you want without spending a dime. It also removes all the ads from the game, so you can play without any distractions or delays.

-

Features of American Marksman MOD APK

-

Unlimited money, gold, wood, and metal

-

The most obvious feature of the American Marksman MOD APK is that it gives you unlimited money, gold, wood, and metal. These are the main currencies in the game that you need to upgrade your weapons and equipment. With unlimited resources, you can buy any weapon or item you want without worrying about running out of money. You can also upgrade your weapons and equipment to the maximum level and enjoy their full potential. This will make your missions easier and more fun.

-

No ads

-

Another feature of the American Marksman MOD APK is that it removes all the ads from the game. Ads are usually displayed after completing a mission or when you want to get extra rewards or bonuses. They can be annoying and distracting for some players who just want to play the game without any interruptions or delays. By downloading the modded version of the game, you can get rid of all the ads and enjoy a smooth and uninterrupted gaming experience.

-

Realistic graphics and sound effects

-

The American Marksman MOD APK also preserves the original quality of the game's graphics and sound effects. The game has realistic 3D graphics that create a immersive atmosphere for the players. The game also has realistic sound effects that enhance the gameplay and make you feel like you are in a real battlefield. You can hear the sound of gunshots, explosions, wind, or birds as you play the game. The modded version of the game does not compromise on these aspects and delivers a high-quality gaming experience.

Various weapons and missions

-

The American Marksman MOD APK also offers a variety of weapons and missions for the players. You can choose from different types of weapons, such as rifles, pistols, shotguns, or grenades. Each weapon has its own advantages and disadvantages, so you need to choose wisely depending on the mission and the target. You can also customize your weapons with different scopes, silencers, or magazines. The game has more than 100 missions that test your skills as a marksman. You have to complete different objectives, such as assassinating targets, protecting allies, or destroying enemy bases. The missions are challenging and diverse, so you will never get bored of playing the game.

-

american marksman mod apk free download
-american marksman hack apk unlimited resources
-american marksman simulation game mod apk
-american marksman latest version mod apk
-american marksman cheats apk unlimited money
-american marksman premium apk mod unlocked
-american marksman shooting game mod apk
-american marksman full apk mod unlimited gold
-american marksman cracked apk mod unlimited wood
-american marksman pro apk mod unlimited metal
-american marksman android game mod apk
-american marksman offline mod apk unlimited money
-american marksman 2023 mod apk free download
-american marksman v1.0.7 mod apk unlimited resources
-american marksman no ads mod apk unlimited money
-american marksman 3d game mod apk unlimited gold
-american marksman sniper game mod apk unlimited wood
-american marksman realistic game mod apk unlimited metal
-american marksman best game mod apk unlimited money
-american marksman new game mod apk free download
-american marksman online mod apk unlimited resources
-american marksman multiplayer mod apk unlimited money
-american marksman pvp game mod apk unlimited gold
-american marksman survival game mod apk unlimited wood
-american marksman adventure game mod apk unlimited metal
-american marksman action game mod apk unlimited money
-american marksman strategy game mod apk free download
-american marksman fun game mod apk unlimited resources
-american marksman addictive game mod apk unlimited money
-american marksman challenging game mod apk unlimited gold
-american marksman easy game mod apk unlimited wood
-american marksman hard game mod apk unlimited metal
-american marksman amazing game mod apk unlimited money
-american marksman awesome game mod apk free download
-american marksman cool game mod apk unlimited resources
-american marksman epic game mod apk unlimited money
-american marksman fantastic game mod apk unlimited gold
-american marksman incredible game mod apk unlimited wood
-american marksman superb game mod apk unlimited metal
-american marksman wonderful game mod apk unlimited money
-download american marksman mod apk for free
-download american marksman hack apk with unlimited resources
-download american marksman simulation game with mod apk
-download american marksman latest version with mod apk
-download american marksman cheats apk with unlimited money
-download american marksman premium apk with mod unlocked
-download american marksman shooting game with mod apk
-download american marksman full apk with mod unlimited gold
-download american marksman cracked apk with mod unlimited wood

-

Easy controls and gameplay

-

The American Marksman MOD APK also has easy controls and gameplay that make the game suitable for anyone. The game has a simple user interface that shows you all the information you need, such as your health, ammo, or mission details. The game also has a tutorial that guides you through the basics of the game. The game has easy controls that let you aim, shoot, zoom, or reload with just a few taps on the screen. The game also has an auto-fire option that lets you shoot automatically when you aim at a target. The game has a smooth and fast gameplay that lets you enjoy the game without any lags or glitches.

-

How to download and install American Marksman MOD APK?

-

If you are interested in downloading and installing the American Marksman MOD APK, you can follow these simple steps:

-

Step 1: Download the APK file from a trusted source

-

The first step is to download the APK file of the American Marksman MOD APK from a trusted source. You can use the link below to download the file directly to your device. The file size is about 100 MB, so make sure you have enough space on your device before downloading it.

-

Download American Marksman MOD APK

-

Step 2: Enable unknown sources on your device

-

The next step is to enable unknown sources on your device. This is necessary because the APK file is not from the official Google Play Store, so you need to allow your device to install apps from other sources. To do this, go to your device settings and look for the security or privacy option. Then, find the unknown sources option and enable it. This will allow you to install the APK file without any problems.

-

Step 3: Install the APK file and launch the game

-

The final step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device and tap on it. Then, follow the instructions on the screen to install the app. Once the installation is done, you can launch the game and enjoy it with unlimited money, gold, wood, and metal.

-

Conclusion

-

American Marksman is a simulation game that lets you experience the thrill of being a sniper. You can complete various missions, such as assassinating targets, protecting allies, or destroying enemy bases. You can also choose from a wide range of weapons, such as rifles, pistols, shotguns, or grenades. You can also upgrade your weapons and equipment to improve your performance and accuracy. The game has realistic graphics and sound effects that make you feel like you are in a real battlefield.

-

If you want to enjoy the game with unlimited money, gold, wood, and metal, you should download the American Marksman MOD APK. This mod gives you unlimited resources that let you buy anything you want without spending a dime. It also removes all the ads from the game, so you can play without any distractions or delays.

-

To download and install the American Marksman MOD APK, you just need to follow these simple steps:

- -

That's it! You can now enjoy the game with unlimited money, gold, wood, and metal.

-

FAQs

-

Here are some frequently asked questions about the American Marksman MOD APK:

-
    -
  1. Is American Marksman MOD APK safe to download and install?
  2. -

    Yes, American Marksman MOD APK is safe to download and install. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a trusted source and scan it with an antivirus before installing it.

    -
  3. Do I need to root my device to use American Marksman MOD APK?
  4. -

    No, you do not need to

    root your device to use American Marksman MOD APK. It works on both rooted and non-rooted devices. However, some features may require root access to work properly.

    -
  5. What are the minimum requirements to play American Marksman MOD APK?
  6. -

    The minimum requirements to play American Marksman MOD APK are:

    - -
  7. Can I play American Marksman MOD APK online with other players?
  8. -

    No, American Marksman MOD APK is not an online game. It is a single-player game that does not require an internet connection to play. You can play it offline anytime and anywhere you want.

    -
  9. Can I update American Marksman MOD APK to the latest version?
  10. -

    Yes, you can update American Marksman MOD APK to the latest version. However, you need to download and install the new version manually from the same source you downloaded the previous version. You cannot update it from the Google Play Store or any other app store.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Den Kelime Oyunu APK - cretsiz nternetsiz ve Yeni Tarz Kelime Oyunu.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Den Kelime Oyunu APK - cretsiz nternetsiz ve Yeni Tarz Kelime Oyunu.md deleted file mode 100644 index 6343278841bb9799c21a0bea0eacd678e589f8bc..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Den Kelime Oyunu APK - cretsiz nternetsiz ve Yeni Tarz Kelime Oyunu.md +++ /dev/null @@ -1,132 +0,0 @@ -
-
- Earn coins and unlock new levels
- Learn new words and improve your vocabulary
- Enjoy the sound and visual effects | Explain how the game works, what are the objectives, and what are the challenges. | | H3: Download and Install | - Go to APKCombo or Google Play Store
- Choose the latest version of the game
- Allow unknown sources if needed
- Follow the instructions and launch the game | Provide a step-by-step guide on how to get the game on your device, with links and screenshots. | | H3: Hints and Bonuses | - Use coins to reveal letters or words
- Watch ads to get more coins or hints
- Complete daily tasks and achievements
- Check out the word treasury and titles | Give some tips on how to use the in-game resources wisely, and how to earn more rewards. | | H2: Why You Should Play Düşen Kelime Oyunu APK | | Body: Highlight the advantages of playing this game, such as brain exercise, relaxation, education, and entertainment. | | H3: Brain Exercise | - Stimulate your cognitive skills
- Enhance your memory and concentration
- Prevent Alzheimer's disease
- Challenge yourself with different levels of difficulty | Explain how playing word puzzles can benefit your mental health and performance, with scientific evidence. | | H3: Relaxation | - Reduce stress and anxiety
- Improve your mood and well-being
- Have fun and enjoy yourself
- Play offline and at your own pace | Explain how playing word puzzles can help you relax and unwind, with personal examples. | | H3: Education | - Learn new Turkish words and meanings
- Expand your vocabulary and knowledge
- Improve your spelling and grammar
- Discover new facts and trivia | Explain how playing word puzzles can enrich your language skills and general culture, with examples from the game. | | H3: Entertainment | - Experience a new style of word puzzle game
- Explore different themes and categories
- Compete with other players online
- Share your progress and achievements with friends | Explain how playing word puzzles can keep you entertained and engaged, with features from the game. | | H2: Conclusion | | Conclusion: Summarize the main points of the article, restate the benefits of playing düşen kelime oyunu apk, and end with a call to action. | Table 2: Article with HTML formatting

Düşen Kelime Oyunu APK: A Fun and Relaxing Word Puzzle Game

-

Do you love word games? Do you want to improve your Turkish vocabulary while having fun? Do you need a break from your busy life? If you answered yes to any of these questions, then you should try düşen kelime oyunu apk.

-

Düşen kelime oyunu apk is a new style of Turkish word puzzle game that has gained over 3 million downloads in a short time. It is a free and offline game that lets you find hidden words and clear the letter boxes. As you play, you will earn coins, unlock new levels, learn new words, and enjoy the sound and visual effects.

-

düşen kelime oyunu apk


DOWNLOAD » https://urlin.us/2uSX7p



-

In this article, we will show you how to play düşen kelime oyunu apk, why you should play it, and what are the benefits of playing it. By the end of this article, you will be ready to download this amazing game and start your word adventure.

-

How to Play Düşen Kelime Oyunu APK

-

Playing düşen kelime oyunu apk is easy and fun. All you need is a smartphone or tablet with Android operating system. Here are the steps to follow:

-

Düşen Kelime Oyunu indir
-Düşen Kelime Oyunu ücretsiz
-Düşen Kelime Oyunu internetsiz
-Düşen Kelime Oyunu hileli apk
-Düşen Kelime Oyunu mod apk
-Düşen Kelime Oyunu son sürüm apk
-Düşen Kelime Oyunu güncel apk
-Düşen Kelime Oyunu android oyun club
-Düşen Kelime Oyunu apk dayı
-Düşen Kelime Oyunu apk indir cepde
-Düşen Kelime Oyunu apk indir android oyun club
-Düşen Kelime Oyunu apk indir hileli
-Düşen Kelime Oyunu apk indir mod
-Düşen Kelime Oyunu apk indir son sürüm
-Düşen Kelime Oyunu apk indir güncel
-Düşen Kelime Oyunu apk indir ücretsiz
-Düşen Kelime Oyunu apk indir internetsiz
-Düşen Kelime Oyunu nasıl oynanır
-Düşen Kelime Oyunu ipucu nasıl alınır
-Düşen Kelime Oyunu kelime hazinesi nedir
-Düşen Kelime Oyunu unvanlar nelerdir
-Düşen Kelime Oyunu yorumlar
-Düşen Kelime Oyunu puanlama sistemi
-Düşen Kelime Oyunu bölüm sayısı
-Düşen Kelime Oyunu yeni bölüm ne zaman gelecek
-Düşen Kelime Oyunu en zor bölüm hangisi
-Düşen Kelime Oyunu en kolay bölüm hangisi
-Düşen Kelime Oyunu en çok sevilen bölüm hangisi
-Düşen Kelime Oyunu en çok bilinen kelimeler nelerdir
-Düşen Kelime Oyunu en çok öğrenilen kelimeler nelerdir
-Düşen Kelime Oyunu beyin geliştirir mi
-Düşen Kelime Oyunu zeka geliştirir mi
-Düşen Kelime Oyunu hafıza güçlendirir mi
-Düşen Kelime Oyunu alzheimer önler mi
-Düşen Kelime Oyunu rahatlatır mı
-Düşen Kelime Oyunu eğlenceli mi
-Düşen Kelime Oyunu bağımlılık yapar mı
-Düşen! kelime oyununun farkı nedir
-düsen kelimenin farkı nedir (without Turkish characters)
-dus kelimenin farkı nedir (without Turkish characters and punctuation)
-düsen kelimenin avantajları nelerdir (without Turkish characters)
-dus kelimenin avantajları nelerdir (without Turkish characters and punctuation)
-düsen kelimenin dezavantajları nelerdir (without Turkish characters)
-dus kelimenin dezavantajları nelerdir (without Turkish characters and punctuation)
-düsen kelimenin alternatifleri nelerdir (without Turkish characters)
-dus kelimenin alternatifleri nelerdir (without Turkish characters and punctuation)
-düsen kelimenin rakipleri kimlerdir (without Turkish characters)
-dus kelimenin rakipleri kimlerdir (without Turkish characters and punctuation)
-düsen kelimenin geliştiricisi kimdir (without Turkish characters)

-

Rules and Features

- -

Download and Install

- -

Here are some screenshots of the game:

- Screenshot 1 -Screenshot 2 -Screenshot 3 -

Hints and Bonuses

- -

Why You Should Play Düşen Kelime Oyunu APK

-

Düşen kelime oyunu apk is not only a fun and relaxing game, but also a beneficial one. Playing this game can help you improve your brain health, mood, language skills, and general knowledge. Here are some of the reasons why you should play this game:

-

Brain Exercise

- -

Relaxation

- -

Education

- -

Entertainment

- -

Conclusion

-

Düşen kelime oyunu apk is a fun and relaxing word puzzle game that can benefit your brain health, mood, language skills, and general knowledge. It is easy and fun to play, and it offers you hundreds of levels with different themes and categories. You can also earn coins, hints, bonuses, titles, and achievements as you play. You can also compete with other players online and share your progress with friends. You can download this game for free from APKCombo or Google Play Store and start your word adventure today.

-

If you are looking for a new and exciting way to improve your Turkish vocabulary while having fun, then düşen kelime oyunu apk is the game for you. Download it now and see for yourself why millions of people love this game.

-

Are you ready to play düşen kelime oyunu apk? Here are some FAQs that might help you:

-

FAQs

-
    -
  1. What is the meaning of düşen kelime oyunu?
  2. -

    Düşen kelime oyunu means falling word game in Turkish. It is a word puzzle game that involves finding hidden words and clearing the letter boxes.

    -
  3. How many levels are there in düşen kelime oyunu apk?
  4. -

    There are over 500 levels in düşen kelime oyunu apk, each with a different theme and category. You can unlock new levels by earning coins or watching ads.

    -
  5. How can I get more coins in düşen kelime oyunu apk?
  6. -

    You can get more coins by finding words, completing levels, watching ads, completing daily tasks and achievements, and finding bonus words. You can use coins to reveal letters or words when you are stuck.

    -
  7. How can I learn new words in düşen kelime oyunu apk?
  8. -

    You can learn new words by tapping on them in the word treasury. You will see their meaning and pronunciation. You can also earn titles by finding special words.

    -
  9. How can I play with other players in düşen kelime oyunu apk?
  10. -

    You can play with other players online by joining the global leaderboard. You will see your rank and score among other players. You can also connect with Facebook and invite your friends to play with you.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Blockman GO-Adventures Mod APK Hack Your Way to Adventure on Apkmody.md b/spaces/1phancelerku/anime-remove-background/Blockman GO-Adventures Mod APK Hack Your Way to Adventure on Apkmody.md deleted file mode 100644 index 6af68f750d466458d1fe06e132d7482889ba86af..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Blockman GO-Adventures Mod APK Hack Your Way to Adventure on Apkmody.md +++ /dev/null @@ -1,90 +0,0 @@ - -

How to Hack Blockman Go Adventures with APKMODY

-

Blockman Go Adventures is a popular sandbox game that offers a variety of gameplay options for players to enjoy. However, some players may find it hard to unlock all the mini-games, modes, accessories and resources in the game. That's why some players resort to hacking Blockman Go Adventures with APKMODY, a website that provides modded APK files for Android games and apps. In this article, we will show you how to hack Blockman Go Adventures with APKMODY and what are the benefits of doing so.

-

What is Blockman Go Adventures?

-

Blockman Go Adventures is a free app that includes minigames, chatting and making friends. You can play various block style minigames here, such as Bed Wars, Sky Block, Egg War, Murder Mystery, Sky Wars and more. Each minigame has its own rules, objectives and rewards. You can also create your own minigames and share them with other players.

-

hack blockman go adventures apkmody


DOWNLOADhttps://jinyurl.com/2uNLRo



-

A sandbox game with various mini-games and modes

-

Blockman Go Adventures is a sandbox game that lets you play, craft and share your fun experiences with your friends. You can explore different worlds, build structures, fight enemies, collect resources and complete quests. You can also join the adventures and venture into the countless minigames from all the different genres. There is always something new and exciting for you to discover every day.

-

A social platform with chat and friends features

-

Blockman Go Adventures is also a social platform that allows you to chat and make friends with other players. You can join or create parties, clans and guilds. You can also send messages, voice chats, gifts and emojis. You can customize your avatar with creative selections of fashionable accessories. With a growing inventory of items, the sky's the only limit.

-

What is APKMODY?

-

APKMODY is a website that provides modded APK files for Android games and apps. At APKMODY, you can easily search and download thousands of MOD APK, Premium APK and Original APK games and apps for free. You can use the search button to find what you're looking for, or browse the pre-designed categories.

-

A website that provides modded APK files for Android games and apps

-

A modded APK file is an altered version of an original APK file that has been modified by someone to add or remove some features or functions. For example, a modded APK file may have unlimited resources, unlocked levels, removed ads or added cheats. A modded APK file may also have a different name or icon than the original one.

-

A source of unlimited resources, features and fun for Blockman Go Adventures

-

APKMODY provides a modded APK file for Blockman Go Adventures that has many advantages over the original one. The modded APK file has a mod menu that lets you enable or disable various hacks in the game. The hacks include fly hack, unlimited Gcubes and money. With these hacks, you can enjoy Block man Go Adventures without any limitations or restrictions. You can also have more fun and creativity with the modded APK file.

-

Blockman Go Adventures Mod Menu Fly Hack
-Blockman Go Mod Apk Unlimited Gcubes and Money
-Blockman Go Adventures Hack Download Media Fire
-How to Hack Blockman Go Adventures v2.46.1
-Blockman Go Mod Apk v2.45.2 Link in Comment
-Blockman Go Adventures Fly Hack No Root
-Blockman Go Mod Menu Apk Latest Version
-Blockman Go Adventures Unlimited Gcubes Mod
-Blockman Go Hack Apkmody Free Download
-Blockman Go Adventures Mod Apk v2.46.1 YouTube
-Blockman Go Mod Apk v2.45.2 YouTube Video
-Blockman Go Adventures Hack Tutorial 2023
-Blockman Go Mod Menu Apk Media Fire Link
-Blockman Go Adventures Unlimited Money Hack
-Blockman Go Hack Apkmody No Survey No Password
-Blockman Go Adventures Mod Apk v2.46.1 Features
-Blockman Go Mod Apk v2.45.2 Download Now
-Blockman Go Adventures Fly Hack Tutorial
-Blockman Go Mod Menu Apk How to Install
-Blockman Go Adventures Unlimited Gcubes Generator
-Blockman Go Hack Apkmody Online Tool
-Blockman Go Adventures Mod Apk v2.46.1 Review
-Blockman Go Mod Apk v2.45.2 Gameplay Video
-Blockman Go Adventures Fly Hack APK Download
-Blockman Go Mod Menu Apk No Ban No Root
-Blockman Go Adventures Unlimited Money Mod APK
-Blockman Go Hack Apkmody 100% Working 2023
-Blockman Go Adventures Mod Apk v2.46.1 Update
-Blockman Go Mod Apk v2.45.2 New Features
-Blockman Go Adventures Fly Hack iOS Android
-Blockman Go Mod Menu Apk All Skins Unlocked
-Blockman Go Adventures Unlimited Gcubes Cheat
-Blockman Go Hack Apkmody Safe and Secure
-Blockman Go Adventures Mod Apk v2.46.1 Download Link
-Blockman Go Mod Apk v2.45.2 Free Download Link
-Blockman Go Adventures Fly Hack No Verification
-Blockman Go Mod Menu Apk Easy to Use
-Blockman Go Adventures Unlimited Money Cheat Code
-Blockman Go Hack Apkmody Latest Version 2023
-Blockman Go Adventures Mod Apk v2.46.1 Media Fire Link

-

How to hack Blockman Go Adventures with APKMODY?

-

Hacking Blockman Go Adventures with APKMODY is very easy and simple. You just need to follow these steps:

-

Download the modded APK file from APKMODY website

-

First, you need to visit the APKMODY website and search for Blockman Go Adventures. You will see the modded APK file for the game with a download button. Click on the download button and wait for the file to be downloaded to your device. The file size is about 140 MB, so make sure you have enough storage space and a stable internet connection.

-

Install the modded APK file on your Android device

-

Next, you need to install the modded APK file on your Android device. Before you do that, you need to enable the installation of apps from unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store. To do that, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the modded APK file in your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.

-

Enjoy the hacked Blockman Go Adventures with mod menu, fly hack, unlimited Gcubes and money

-

Finally, you can enjoy the hacked Blockman Go Adventures with all the features and hacks that you want. To access the mod menu, you need to tap on the floating icon on the screen. The mod menu will show you all the hacks that you can enable or disable in the game. You can use the fly hack to fly around the map, the unlimited Gcubes and money hack to buy anything you want in the game, and other hacks that will make your gameplay more fun and easy.

-

What are the benefits of hacking Blockman Go Adventures with APKMODY?

-

Hacking Blockman Go Adventures with APKMODY has many benefits that will enhance your gaming experience. Here are some of them:

-

You can access all the mini-games and modes without restrictions

-

Some of the mini-games and modes in Blockman Go Adventures require you to have a certain amount of Gcubes or money to play them. For example, you need 100 Gcubes to play Bed Wars, 50 Gcubes to play Sky Wars, and 10 Gcubes to play Murder Mystery. With the unlimited Gcubes and money hack, you can access all these mini-games and modes without any restrictions. You can also join any server or room that you want without worrying about your level or rank.

-

You can customize your avatar with any accessories you want

-

Another benefit of hacking Blockman Go Adventures with APKMODY is that you can customize your avatar with any accessories you want. You can choose from a wide range of hats, glasses, masks, clothes, shoes, wings, tails and more. You can also mix and match different accessories to create your own unique style. With the unlimited Gcubes and money hack, you can buy any accessory you want in the game without spending real money.

-

You can chat and make friends with other players easily

-

The last benefit of hacking Blockman Go Adventures with APKMODY is that you can chat and make friends with other players easily. You can use the chat feature to communicate with other players in the game. You can also send voice chats, gifts and emojis to express yourself better. You can also add other players as friends and join their parties, clans or guilds. With the fly hack, you can also visit other players' worlds and see what they have built.

-

Conclusion

-

Blockman Go Adventures is a fun and exciting sandbox game that offers a lot of gameplay options for players to enjoy. However, some players may want to hack Blockman Go Adventures with APKMODY to get unlimited resources, features and fun in the game. In this article, we have shown you how to hack Blockman Go Adventures with APKMODY and what are the benefits of doing so. We hope that this article has been helpful for you and that you have learned something new today.

-

If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you and answer your queries as soon as possible.

-

Thank you for reading this article and have a great day!

-

Frequently Asked Questions

-

Here are some of the frequently asked questions about hacking Blockman Go Adventures with APKMODY and their answers:

-

Is hacking Blockman Go Adventures with APKMODY safe?

-

Hacking Blockman Go Adventures with APKMODY is generally safe, as long as you download the modded APK file from the official APKMODY website. The modded APK file is tested and verified by the APKMODY team before being uploaded to the website. However, you should always be careful when installing apps from unknown sources, as they may contain viruses or malware that can harm your device or steal your personal information. You should also backup your data before installing the modded APK file, in case something goes wrong.

-

Is hacking Blockman Go Adventures with APKMODY legal?

-

Hacking Blockman Go Adventures with APKMODY is not legal, as it violates the terms of service and the intellectual property rights of the game developer. By hacking Blockman Go Adventures with APKMODY, you are modifying the original game without the permission of the game developer. This can result in legal actions or penalties from the game developer, such as banning your account, suspending your access or suing you for damages. Therefore, you should hack Blockman Go Adventures with APKMODY at your own risk and responsibility.

-

Will I get banned for hacking Blockman Go Adventures with APKMODY?

-

There is a possibility that you will get banned for hacking Blockman Go Adventures with APKMODY, as the game developer may detect your abnormal activities and flag your account. The game developer may also have anti-cheat systems or mechanisms that can prevent or detect hacking attempts. If you get banned for hacking Blockman Go Adventures with APKMODY, you will lose all your progress, data and items in the game. You may also not be able to play the game again with the same account or device. Therefore, you should hack Blockman Go Adventures with APKMODY cautiously and moderately.

-

Can I update Blockman Go Adventures after hacking it with APKMODY?

-

No, you cannot update Blockman Go Adventures after hacking it with APKMODY, as the modded APK file is not compatible with the official updates from the game developer. If you try to update Blockman Go Adventures after hacking it with APKMODY, you may encounter errors, crashes or glitches in the game. You may also lose all the hacks and features that you have enabled in the modded APK file. Therefore, you should not update Blockman Go Adventures after hacking it with APKMODY.

-

Can I hack other games and apps with APKMODY?

-

Yes, you can hack other games and apps with APKMODY, as the website provides modded APK files for many other popular games and apps on Android. You can find games and apps from various categories and genres on the website, such as action, adventure, arcade, casual, puzzle, simulation, strategy, education, entertainment, lifestyle, music, social and more. You can also request for new games and apps to be modded by the APKMODY team on their website.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Temple Run 2 Lantern Festival Mod Apk and Enjoy Unlimited Coins Gems and Characters.md b/spaces/1phancelerku/anime-remove-background/Download Temple Run 2 Lantern Festival Mod Apk and Enjoy Unlimited Coins Gems and Characters.md deleted file mode 100644 index f1763d26d955a1d7265f9ed6ddd0e524fcaf1336..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Temple Run 2 Lantern Festival Mod Apk and Enjoy Unlimited Coins Gems and Characters.md +++ /dev/null @@ -1,100 +0,0 @@ - -

Temple Run 2 Lantern Festival Mod APK: How to Download and Install

-

If you are a fan of Temple Run 2, the popular endless runner game that has been downloaded over a billion times, you might be interested in trying out the Lantern Festival mod apk. This mod apk is a modified version of the game that offers unlimited coins and gems, new characters and power-ups, and an ad-free gameplay experience. You can also enjoy the Lantern Festival, a traditional Chinese festival that honours deceased ancestors and promotes reconciliation, peace, and forgiveness. In this article, we will show you what Temple Run 2 is, what the Lantern Festival mod apk is, and how to download and install it on your Android device.

-

temple run 2 lantern festival mod apk


DOWNLOAD ❤❤❤ https://jinyurl.com/2uNU1j



-

What is Temple Run 2?

-

Temple Run 2 is a sequel to the smash hit phenomenon that redefined mobile gaming. In this game, you have to run, jump, turn, and slide your way through perilous cliffs, zip lines, mines, and forests as you try to escape with the cursed idol. How far can you run?

-

Game features and gameplay

-

Temple Run 2 features beautiful new graphics, gorgeous new organic environments, new obstacles, more power-ups, more achievements, and special powers for each character. You can also choose from different characters with unique abilities, such as Guy Dangerous, Scarlett Fox, Barry Bones, Karma Lee, Montana Smith, Francisco Montoya, Zack Wonder, and more. You can also customize your character with different outfits and accessories.

-

The gameplay is simple but addictive. You have to swipe left or right to turn, swipe up to jump, swipe down to slide, and tilt your device to move sideways. You have to avoid crashing into obstacles or falling off the edge while collecting coins and gems along the way. You can also use power-ups such as shields, magnets, boosters, coin multipliers, and head starts to enhance your performance. You can also activate special powers for each character by filling up a meter with green gems.

-

temple run 2 mod apk unlimited coins and gems lantern festival
-temple run 2 lantern festival hack mod apk download
-temple run 2 chinese new year lantern festival mod apk
-temple run 2 mod apk latest version lantern festival
-temple run 2 lantern festival mod apk free shopping
-temple run 2 mod apk all maps unlocked lantern festival
-temple run 2 lantern festival mod apk android 1
-temple run 2 mod apk unlimited everything lantern festival
-temple run 2 lantern festival mod apk revdl
-temple run 2 mod apk no ads lantern festival
-temple run 2 lantern festival mod apk rexdl
-temple run 2 mod apk all characters unlocked lantern festival
-temple run 2 lantern festival mod apk offline
-temple run 2 mod apk mega mod lantern festival
-temple run 2 lantern festival mod apk unlimited money
-temple run 2 mod apk god mode lantern festival
-temple run 2 lantern festival mod apk happymod
-temple run 2 mod apk premium unlocked lantern festival
-temple run 2 lantern festival mod apk unlimited diamonds
-temple run 2 mod apk high score lantern festival
-temple run 2 lantern festival mod apk online
-temple run 2 mod apk all power ups maxed lantern festival
-temple run 2 lantern festival mod apk for pc
-temple run 2 mod apk unlimited lives lantern festival
-temple run 2 lantern festival mod apk old version
-temple run 2 mod apk all abilities unlocked lantern festival
-temple run 2 lantern festival mod apk ios
-temple run 2 mod apk unlimited keys lantern festival
-temple run 2 lantern festival mod apk original
-temple run 2 mod apk all outfits unlocked lantern festival
-temple run 2 lantern festival mod apk update
-temple run 2 mod apk unlimited boosters lantern festival
-temple run 2 lantern festival mod apk cheat
-temple run 2 mod apk all artifacts unlocked lantern festival
-temple run 2 lantern festival mod apk full version
-temple run 2 mod apk unlimited gems and coins latest version download for android - allapksmod.com[^1^]

-

Different maps and modes

-

Temple Run 2 offers different maps and modes for you to explore and enjoy. You can run through the Sky Summit, Frozen Shadows, Blazing Sands, Lost Jungle, Spooky Summit, Pirate Cove, Spirit Cove, Holi Festival, Fall Jungle, or Winter Wasteland. Each map has its own theme, scenery, obstacles, and challenges.

-

You can also play different modes such as Daily Challenges, Global Challenges, Artifacts Missions, or Map Events. These modes give you specific tasks or goals to complete and reward you with coins, gems, or other prizes.

-

What is the Lantern Festival Mod APK?

-

The Lantern Festival mod apk is a modified version of Temple Run 2 that gives you some extra features and benefits that are not available in the original game. These include:

-

Mod features and benefits

- -

How to download and install the mod apk

-

To download and install the mod apk on your Android device, you need to follow these steps:

- <

What is the Lantern Festival?

-

The Lantern Festival is a traditional Chinese festival that originated in the Qin dynasty (221 - 207 BC). It falls on the 15th day of the first month of the lunar calendar, which is usually in February or early March on the Gregorian calendar. It marks the end of the Chinese New Year celebrations and the start of the new lunar year. It is also a time to honour deceased ancestors and promote reconciliation, peace, and forgiveness.

-

History and significance of the festival

-

There are many legends and stories about the origin and significance of the Lantern Festival. One of them is that it was a way to worship Taiyi, the ancient god of heaven, who controlled the destiny of human beings. The emperor would ask Taiyi to bring favourable weather and good health to his people.

-

Another legend is that it was a way to celebrate the birthday of Tianguan, the Taoist god of good fortune. People would light lanterns and pray for his blessings.

-

A third legend is that it was a way to commemorate the Buddha, who enlightened people with his teachings. Buddhist monks would light lanterns in the temples to show respect to the Buddha. Later, this custom spread to the general public.

-

Regardless of the origin, the Lantern Festival has become a symbol of hope, joy, and harmony. People light lanterns to express their wishes and gratitude, and to enjoy the beauty of the full moon. The lanterns are also seen as a way to guide the spirits of the ancestors back to their families.

-

How to celebrate the festival in the game

-

In Temple Run 2, you can celebrate the Lantern Festival by playing on the special map called Lantern Festival. This map features a stunning night scene with colourful lanterns, fireworks, and dragon dances. You can also collect red envelopes, which are traditional gifts containing money or blessings, along the way.

-

To play on this map, you need to download and install the Lantern Festival mod apk, which gives you access to this map and other features. You can also choose from different characters that are related to Chinese culture, such as Sun Wukong, Mulan, or Emperor Qin Shi Huang. You can also use different power-ups that are inspired by Chinese elements, such as jade coins, dragon scrolls, or firecrackers.

-

The Lantern Festival map is a great way to experience the charm and fun of this ancient festival while enjoying the thrill and challenge of Temple Run 2.

Conclusion

-

Temple Run 2 is an amazing game that offers endless fun and excitement. You can run through different maps and modes, collect coins and gems, unlock new characters and power-ups, and challenge yourself with various tasks and goals. You can also enjoy the Lantern Festival mod apk, which gives you unlimited coins and gems, new characters and power-ups, and an ad-free gameplay. You can also celebrate the Lantern Festival, a beautiful and meaningful Chinese festival that honours the ancestors and promotes peace and harmony. If you want to download and install the Lantern Festival mod apk, you can follow the steps we have provided in this article. We hope you have a great time playing Temple Run 2 and experiencing the Lantern Festival.

-

FAQs

-

Here are some frequently asked questions about Temple Run 2 and the Lantern Festival mod apk:

- - - - - - - - - - - - - - - - - - - - - - - - - -
QuestionAnswer
Is Temple Run 2 free to play?Yes, Temple Run 2 is free to play. However, it contains in-app purchases that allow you to buy coins, gems, or other items with real money. You can also watch ads to earn some rewards.
Is the Lantern Festival mod apk safe to use?Yes, the Lantern Festival mod apk is safe to use. However, you need to make sure that you download it from a trusted source and scan it with an antivirus program before installing it. You also need to enable the unknown sources option on your device settings to allow the installation of the mod apk.
Will I lose my progress if I use the mod apk?No, you will not lose your progress if you use the mod apk. The mod apk will not overwrite your original game data. However, you may not be able to sync your progress with your Google Play account or other social media accounts.
Can I play online with other players using the mod apk?No, you cannot play online with other players using the mod apk. The mod apk is only for offline gameplay. You may face some issues or errors if you try to connect to the internet or join a multiplayer mode using the mod apk.
Can I update the mod apk when a new version of Temple Run 2 is released?No, you cannot update the mod apk when a new version of Temple Run 2 is released. The mod apk is based on a specific version of the game and may not be compatible with newer versions. You may need to wait for a new version of the mod apk to be released or uninstall the mod apk and install the original game from the Google Play Store.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download the Latest WhatsApp Business App for Free A Guide for Small Businesses.md b/spaces/1phancelerku/anime-remove-background/Download the Latest WhatsApp Business App for Free A Guide for Small Businesses.md deleted file mode 100644 index 516e945c75f13c6c5e889fc2e163d113c0501fa8..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download the Latest WhatsApp Business App for Free A Guide for Small Businesses.md +++ /dev/null @@ -1,78 +0,0 @@ -
-

Download the latest WhatsApp Business and transform your business

| |

WhatsApp is the most popular messaging app in the world, with over 2 billion users. But did you know that there is a version of WhatsApp designed specifically for businesses? It's called WhatsApp Business and it can help you engage with your customers, drive sales, and grow your business.

-

download the latest whatsapp business


Download Ziphttps://jinyurl.com/2uNMHq



| |

What is WhatsApp Business and how is it different from WhatsApp?

| |

WhatsApp Business is a free-to-download app that allows you to create a business presence on WhatsApp, communicate more efficiently with your customers, and manage your workflow. It is built on top of WhatsApp Messenger and includes all the features that you rely on, such as multimedia, free calls, group chat, and end-to-end encryption.

| |

The main difference between WhatsApp and WhatsApp Business is that WhatsApp Business has a verified and more complete business profile that helps your customers trust who they are chatting with. You can also use WhatsApp Business with a landline or fixed phone number, and run both WhatsApp Business and WhatsApp Messenger on the same phone as long as they are linked to different numbers.

| |

WhatsApp Business features and benefits

| |

WhatsApp Business offers many features and benefits that can help you transform your business. Here are some of them:

-

How to download the latest whatsapp business app for android
-Download the latest whatsapp business app for iphone
-Benefits of using the latest whatsapp business app for small businesses
-How to migrate your whatsapp messenger account to the latest whatsapp business app
-How to set up your business profile on the latest whatsapp business app
-How to use the latest whatsapp business app features to engage your customers
-How to update the latest whatsapp business app on your device
-How to backup and restore your chat history on the latest whatsapp business app
-How to use the whatsapp web feature on the latest whatsapp business app
-How to get support and help for the latest whatsapp business app
-How to download the latest whatsapp business apk file from google play store
-How to install the latest whatsapp business app on your pc or laptop
-How to use the latest whatsapp business app with a landline or fixed number
-How to run both the latest whatsapp business app and whatsapp messenger on the same phone
-How to switch between the latest whatsapp business app and whatsapp messenger
-How to verify your business phone number on the latest whatsapp business app
-How to use the business messaging tools on the latest whatsapp business app
-How to send multimedia messages on the latest whatsapp business app
-How to make free calls and international messages on the latest whatsapp business app
-How to create a group chat on the latest whatsapp business app
-How to use the away messages and greeting messages on the latest whatsapp business app
-How to manage your contacts and customers on the latest whatsapp business app
-How to use the quick replies and labels on the latest whatsapp business app
-How to use the catalog feature on the latest whatsapp business app
-How to use the payment feature on the latest whatsapp business app
-How to use the analytics feature on the latest whatsapp business app
-How to use the security and privacy settings on the latest whatsapp business app
-How to delete your account or data on the latest whatsapp business app
-How to download the latest version of whatsapp business for windows or mac
-How to download and install bluestacks emulator for using the latest whatsapp business app on pc or mac
-How to download and install nox player emulator for using the latest whatsapp business app on pc or mac
-How to download and install memu play emulator for using the latest whatsapp business app on pc or mac
-How to download and install ldplayer emulator for using the latest whatsapp business app on pc or mac
-How to download and install koplayer emulator for using the latest whatsapp business app on pc or mac
-How to download and install genymotion emulator for using the latest whatsapp business app on pc or mac
-How to download and install remix os player emulator for using the latest whatsapp business app on pc or mac
-How to download and install droid4x emulator for using the latest whatsapp business app on pc or mac
-How to download and install tencent gaming buddy emulator for using the latest whatsapp business app on pc or mac
-How to download and install gameloop emulator for using the latest whatsapp business app on pc or mac
-How to download and install smartgaga emulator for using the latest whatsapp business app on pc or mac
-What are the advantages of using an emulator for running the latest whatsapp business app on pc or mac
-What are the disadvantages of using an emulator for running the latest whatsapp business app on pc or mac
-What are the system requirements for running an emulator for using the latest whatsapp business app on pc or mac
-What are some tips and tricks for using an emulator for running the latest whatsapp business app on pc or mac
-What are some common problems and solutions for using an emulator for running the latest whatsapp business app on pc or mac

| |

How to create a business profile

| |

A business profile is like your digital storefront on WhatsApp. It allows you to showcase your business name, logo, website, address, category, description, and catalog. To create a business profile, download the WhatsApp Business app from the Google Play Store or the App Store and follow the instructions to verify your business phone number. Then, tap More options > Settings > your business name and fill in the details.

| |

How to use messaging tools

| |

Messaging tools are designed to help you respond to your customers faster and more effectively. You can use labels to organize your chats and contacts, greeting messages to introduce your business to new customers, quick replies to save and reuse frequently sent messages, and away messages to let your customers know when you are not available.

| |

How to showcase your products and services

| |

A catalog is a feature that allows you to display your products and services on WhatsApp. You can add images, prices, descriptions, links, and codes to your catalog items. Customers can browse your catalog and place orders directly from the app. To create a catalog, tap More options > Settings > Business tools > Catalog.

| |

How to download the latest WhatsApp Business app

| |

If you want to download the latest WhatsApp Business app, follow these steps:

| |

For Android devices

| | -| |

For iPhone devices

- -|

Conclusion

-

If you want to take your business communication to the next level, download the latest WhatsApp Business app today. You will

If you want to take your business communication to the next level, download the latest WhatsApp Business app today. You will be able to create a professional and personalized business profile, use smart messaging tools, and showcase your products and services to millions of potential customers. WhatsApp Business is the ultimate app for small and medium businesses that want to connect with their customers in a simple, secure, and reliable way.

-

FAQs

-

Here are some frequently asked questions about WhatsApp Business:

-
    -
  1. What is the difference between WhatsApp Business and WhatsApp Business API?
  2. -

    WhatsApp Business is an app that you can download on your phone and use to manage your business communication. WhatsApp Business API is a solution that allows you to integrate WhatsApp with your existing systems and platforms, such as CRM, e-commerce, or chatbots. WhatsApp Business API is suitable for larger businesses that need more advanced features and scalability.

    -
  3. Can I use WhatsApp Business on my computer?
  4. -

    Yes, you can use WhatsApp Business on your computer by using WhatsApp Web or WhatsApp Desktop. You will need to scan a QR code with your phone to link your devices. You can also download the WhatsApp Business app on your computer if you have Windows 8.1 or higher or Mac OS X 10.10 or higher.

    -
  5. How much does WhatsApp Business cost?
  6. -

    WhatsApp Business is free to download and use. However, you may incur data charges from your mobile provider for using the app. You may also be charged a fee for sending messages to customers who are not in your contact list or who have not initiated a conversation with you in the past 24 hours. This fee varies depending on the country and carrier of the recipient.

    -
  7. How can I verify my business on WhatsApp?
  8. -

    Verification is a process that confirms that your business phone number matches the phone number on your business profile. Verification is optional and not required to use WhatsApp Business. To request verification, tap More options > Settings > Business tools > Verified business name and follow the instructions.

    -
  9. How can I get customer feedback on WhatsApp?
  10. -

    You can get customer feedback on WhatsApp by using surveys, polls, ratings, or reviews. You can create these using third-party tools or platforms that integrate with WhatsApp. For example, you can use SurveyMonkey, Typeform, Google Forms, or JotForm to create surveys and polls and send them to your customers via WhatsApp.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Escape from Grannys House in Granny 3 MOD APK with No Ads and God Mode.md b/spaces/1phancelerku/anime-remove-background/Escape from Grannys House in Granny 3 MOD APK with No Ads and God Mode.md deleted file mode 100644 index dbcc26a9ae4122e56b1c0f5fab8c5815a1f0c765..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Escape from Grannys House in Granny 3 MOD APK with No Ads and God Mode.md +++ /dev/null @@ -1,162 +0,0 @@ - -

Granny Chapter 3 Mod APK: How to Download and Play the Latest Version of the Horror Game

-

If you are a fan of horror games, you might have heard of Granny, a popular indie survival horror game developed by DVloper. The game has spawned two sequels, Granny: Chapter Two and Granny 3, which have added more features, characters, and locations to the original game. In this article, we will focus on Granny Chapter 3 Mod APK, a modified version of the third installment of the series that offers some advantages over the official version. We will explain what Granny Chapter 3 is, what Granny Chapter 3 Mod APK is, how to download and install it, how to play it, and what are some tips and tricks for playing it. We will also share some reviews and ratings for Granny Chapter 3 Mod APK from other players.

-

What is Granny Chapter 3?

-

Granny Chapter 3 is the latest game in the Granny series, released on August 10, 2021 for Android and iOS devices. It is a horror game that challenges you to escape from a house where you are trapped by a psychotic old woman named Granny and her husband Grandpa, who are both undead and have supernatural abilities. You also have to deal with a giant spider that lives in the attic and a crow that guards a key item. You have five days to find a way out of the house, using various items and tools that you can find or craft. You have to be careful and quiet, as Granny and Grandpa can hear everything and will chase you if they spot you. You can hide in wardrobes, under beds, or in other places, but they won't give up easily. You can also fight back by using weapons such as a shotgun, a crossbow, or a pepper spray, but they are limited and hard to find. If you get caught by Granny or Grandpa, you will lose a day and wake up in a different room. If you run out of days, you will get a game over scene where you are killed in a gruesome way.

-

granny chapter 3 mod apk


DOWNLOAD --->>> https://jinyurl.com/2uNQ6D



-

The plot of Granny Chapter 3

-

The plot of Granny Chapter 3 is not very clear, as there are no cutscenes or dialogues in the game. However, based on some clues and hints, we can infer that the game takes place after the events of Granny: Chapter Two, where you escaped from a boat where you were held captive by Granny and Grandpa. You somehow ended up in their house, which is located in a forest. You don't remember how you got there or why they are after you. You only know that you have to get out of there as soon as possible before they kill you.

-

The features of Granny Chapter 3

-

Granny Chapter 3 has many features that make it an exciting and terrifying horror game. Some of them are:

- -

What is Granny Chapter 3 Mod APK?

-

Granny Chapter 3 Mod APK is a modified version of Granny Chapter 3 that offers some advantages over the official version. A mod APK is an Android application package file that has been altered or hacked by a third-party developer to add or remove some features from the original app. Some of the benefits of using Granny Chapter 3 Mod APK are:

- -

The benefits of using Granny Chapter 3 Mod APK

-

The benefits of using Granny Chapter 3 Mod APK are obvious: you can enjoy the game without any limitations or restrictions. You can have more fun and excitement by using all the features and options that the game has to offer. You can also save your time and effort by skipping the hard and tedious parts of the game. You can also impress your friends by showing them your achievements and skills in the game.

-

The risks of using Granny Chapter 3 Mod APK

-

However, using Granny Chapter 3 Mod APK also comes with some risks that you should be aware of before downloading and installing it. Some of the risks are:

- -

How to download and install Granny Chapter 3 Mod APK?

-

If you still want to try Granny Chapter 3 Mod APK, despite knowing its risks, here are some steps that you need to follow to download and install it on your device:

-

granny 3 mod menu apk
-granny 3 mod apk unlimited ammo
-granny 3 mod apk download for android
-granny 3 mod apk god mode
-granny 3 mod apk invisible
-granny 3 mod apk no ads
-granny 3 mod apk latest version
-granny 3 mod apk free shopping
-granny 3 mod apk all unlocked
-granny 3 mod apk unlimited health
-granny 3 mod apk android 1
-granny 3 mod apk revdl
-granny 3 mod apk happy mod
-granny 3 mod apk rexdl
-granny 3 mod apk an1
-granny 3 mod apk offline
-granny 3 mod apk no root
-granny 3 mod apk anti ban
-granny 3 mod apk unlimited money
-granny 3 mod apk hack
-granny 3 mod apk cheat
-granny 3 mod apk full version
-granny 3 mod apk premium
-granny 3 mod apk pro
-granny 3 mod apk vip
-granny 3 mod apk mega mod
-granny 3 mod apk unlimited everything
-granny 3 mod apk no kill
-granny 3 mod apk easy escape
-granny 3 mod apk no sound
-granny 3 mod apk no timer
-granny 3 mod apk no ads download
-granny 3 mod apk download link
-granny chapter three mod menu apk download
-download game granny chapter three mod menu apk android gratis terbaru
-cara download dan instal game granny chapter three versi terbaru dengan fitur cheat menu di android
-how to download and install granny chapter three latest version with cheat menu feature on android
-best site to download granny chapter three hacked version for free
-tips and tricks to play and win in granny chapter three horror game
-how to unlock all characters and items in granny chapter three using modded version

-

Step 1: Enable unknown sources on your device

-

Before you can install any modded version of an app on your device, you need to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You might get a warning message that says installing apps from unknown sources might harm your device, but you can ignore it if you trust the source of the app.

-

Step 2: Download the Granny Chapter 3 Mod APK file from a trusted source

-

The next step is to download the Granny Chapter 3 Mod APK file from a trusted source. There are many websites that offer modded versions of apps, but not all of them are safe or reliable. You need to do some research and check the reviews and ratings of the website before downloading anything from it. You can also use a VPN or antivirus app to protect your device from any potential threats. To download the Granny Chapter 3 Mod APK file, go to the website of your choice, find the download link, and click on it. You might have to complete some surveys or watch some ads before you can access the download link, but be careful not to click on any suspicious or malicious links. The download process might take some time depending on your internet speed and the size of the file.

-

Step 3: Install the Granny Chapter 3 Mod APK file on your device

-

The final step is to install the Granny Chapter 3 Mod APK file on your device. To do this, go to your device file manager, find the downloaded file, and tap on it. You might get a pop-up message that says installing this app might harm your device, but you can ignore it if you trust the source of the app. The installation process might take some time depending on your device performance and the size of the file. Once the installation is done, you can open the app and enjoy playing Granny Chapter 3 Mod APK.

-

How to play Granny Chapter 3 Mod APK?

-

Playing Granny Chapter 3 Mod APK is similar to playing the official version of Granny Chapter 3, except that you have more options and features to use. You can choose the difficulty level, the game mode, the character skin, the weapon, and other settings before starting the game. You can also use the unlimited ammo, health, money, and other benefits that come with the modded version of the game. The goal of the game is still to escape from the house within five days by finding and using various items and tools. You can also explore the house and discover its secrets and mysteries. You can also play with your friends online in the multiplayer mode and cooperate or compete with them.

-

Tips and tricks for playing Granny Chapter 3 Mod APK

-

Here are some tips and tricks for playing Granny Chapter 3 Mod APK that might help you survive and escape from the house:

- -

Reviews and ratings for Granny Chapter 3 Mod APK

-

Granny Chapter 3 Mod APK has received mixed reviews and ratings from other players who have tried it. Some of them are positive and praise the game for its graphics, gameplay, sound effects, features, options, and fun. Some of them are negative and criticize the game for its bugs, glitches, errors, crashes, ads, and difficulty. Here are some examples of reviews and ratings for Granny Chapter 3 Mod APK from different sources:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SourceReviewRating
Google Play Store"This game is awesome. I love the graphics and the sound effects. The game is very challenging and scary. I like the multiplayer mode where I can play with my friends. The mod APK is very useful and easy to install. I recommend this game to everyone who likes horror games."5 stars
Google Play Store"This game is terrible. It has so many bugs and glitches. The game keeps crashing and freezing. The ads are annoying and intrusive. The game is too hard and frustrating. The mod APK is fake and dangerous. It gave me viruses and malware on my device. I hate this game and I want a refund."1 star
YouTube"I watched a video of this game and it looks amazing. The graphics are realistic and the sound effects are creepy. The game is very exciting and thrilling. I like the new features and options that the game has. The mod APK is awesome and helpful. It gives me unlimited ammo, health, money, and more. I can't wait to play this game."Liked
YouTube"I played this game and it sucks. The graphics are poor and the sound effects are annoying. The game is very boring and repetitive. I don't like the new features and options that the game has. The mod APK is useless and harmful. It removes the original features, functions, and challenges of the game. It also makes my device slow and laggy."Disliked
Reddit"I downloaded this game and it's pretty good. The graphics are decent and the sound effects are scary. The game is very challenging and fun. I like the multiplayer mode where I can play with other people online. The mod APK is nice and convenient. It gives me more options and features to use in the game."Upvoted
Reddit"I installed this game and it's awful. It has so many errors and crashes. The game doesn't work properly on my device. The ads are irritating and unnecessary. The game is too easy and dull. I don't like the multiplayer mode where I have to deal with trolls and cheaters. The mod APK is bad and risky. It makes my device vulnerable to hackers and attackers."Downvoted
-

Conclusion

-

In conclusion, Granny Chapter 3 Mod APK is a modified version of Granny Chapter 3, a horror game that challenges you to escape from a house where you are trapped by Granny, Grandpa, a spider, and a crow. It offers some advantages over the official version, such as unlimited ammo, health, money, levels, modes, etc., but it also comes with some risks, such as bans, viruses, malware, errors, glitches, etc.

-

If you want to try Granny Chapter 3 Mod APK, you need to download it from a trusted source, enable unknown sources on your device, install it on your device, and enjoy playing it.

-

If you want to play Granny Chapter 3 Mod APK safely and effectively, you need to follow some tips and tricks, such as using headphones, crouch mode, peek mode, distraction items, hiding places, weapons, items, etc.

-

If you want to know more about Granny Chapter 3 Mod APK, you can read some reviews and ratings from other players who have tried it.

-

We hope this article has helped you understand what Granny Chapter 3 Mod APK is, how to download and install it, how to play it, and what are some tips and tricks for playing it. We also hope you have enjoyed reading this article and found it useful and engaging. Thank you for your attention and interest.

-

FAQs

-

Here are some frequently asked questions about Granny Chapter 3 Mod APK that you might want to know:

-

Q: Is Granny Chapter 3 Mod APK free?

-

A: Yes, Granny Chapter 3 Mod APK is free to download and play. However, you might have to pay for some in-app purchases or watch some ads to access some features or items in the game.

-

Q: Is Granny Chapter 3 Mod APK safe?

-

A: Granny Chapter 3 Mod APK is not completely safe, as it might contain some viruses or malware that can harm your device or compromise your personal information or data. It might also cause some errors or glitches in the game or make it incompatible with future updates or patches. It might also get you banned from the game or lose your account if the developers detect that you are using a modded version of the game.

-

Q: Is Granny Chapter 3 Mod APK legal?

-

A: Granny Chapter 3 Mod APK is not legal, as it violates the terms and conditions of the original app and infringes the intellectual property rights of the developers. It might also violate some laws or regulations in your country or region regarding online gaming or hacking.

-

Q: How can I update Granny Chapter 3 Mod APK?

-

A: You can update Granny Chapter 3 Mod APK by downloading and installing the latest version of the modded app from the same source that you got it from. However, you might lose some features or functions of the previous version or encounter some compatibility issues with the new version.

-

Q: How can I uninstall Granny Chapter 3 Mod APK?

-

A: You can uninstall Granny Chapter 3 Mod APK by going to your device settings, then apps, then Granny Chapter 3 Mod APK, and tapping on uninstall. You might also want to delete the downloaded file from your device file manager and clear your device cache and data to remove any traces of the modded app.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_objects.py b/spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_objects.py deleted file mode 100644 index 4763e0eef2eb1140e0e01d387e1e6aca6bcaddc5..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_objects.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - -from . import DummyObject, requires_backends - - -class AltDiffusionImg2ImgPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class AltDiffusionPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class CycleDiffusionPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class LDMTextToImagePipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class PaintByExamplePipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionDepth2ImgPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionImageVariationPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionImg2ImgPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionInpaintPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionMegaPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionPipelineAllInOne(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionPipelineSafe(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class StableDiffusionUpscalePipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class UnCLIPPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class VersatileDiffusionPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - -class VQDiffusionPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp"]) diff --git a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123821KB.py b/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123821KB.py deleted file mode 100644 index becbfae85683a13bbb19d3ea6c840da24e61e01e..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123821KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6.md b/spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6.md deleted file mode 100644 index cad28f4f52c998a28b8ccb88903ecdab03985e6a..0000000000000000000000000000000000000000 --- a/spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6.md +++ /dev/null @@ -1,46 +0,0 @@ -# Engineering Wiki - - - -## Codebase - ---- - -[Code Reviews](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Code%20Reviews%202b60c26d2a2e4a348f8f14c77023c385.md) - -[ABstract(插件化AB Testing平台)](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/ABstract%EF%BC%88%E6%8F%92%E4%BB%B6%E5%8C%96AB%20Testing%E5%B9%B3%E5%8F%B0%EF%BC%89%20746b87acd94643ca871ec661b63f196c.md) - -[VUE](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/VUE%209501304a2b03470cad0eea93992d65ae.md) - -[Backend](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Backend%20137c41fa386f43249b249e956eb06bb0.md) - -[AWS](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/AWS%20b022fe0cb7084cc0b64624f7bc8cde2c.md) - -[Redis](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Redis%209e063b60eca24a1783c225cfdc21dd8c.md) - -[CircleCI](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/CircleCI%20719905fcb593423cad302d3fdc1c5dff.md) - -[Smart Domain](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Smart%20Domain%203b0daf8bb0d740439426cfab214f1fa6.md) - -## Guides & Processes - ---- - -[Getting Started](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Getting%20Started%206bc871dcdd4a4554b5b22c0c40740841.md) - -[Engineering Guidelines](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Engineering%20Guidelines%204208cbd4733d4f6f94982f3fb24f6379.md) - -[Development Lifecycle ](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Development%20Lifecycle%20e20a5470e52f49e9bbc4f255cf81db4b.md) - -[How to Deploy](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/How%20to%20Deploy%20b7c4f3fd308944af8ba4637ec40fa4f9.md) - -[Useful Commands](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Useful%20Commands%208a05b1de77ec44b6a55e388c2cc7fe47.md) - -[Engineering Interviews](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Engineering%20Interviews%204be8039581d04456b0151f2cc4b22130.md) - -[How to QA ](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/How%20to%20QA%202f036148193a4fccac2c9e8ae9e6d197.md) - -[Engineering Wiki](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Engineering%20Wiki%208da06b3dcf1b4eaaa3e90aa70feefe56.md) \ No newline at end of file diff --git a/spaces/AIConsultant/MusicGen/docs/DATASETS.md b/spaces/AIConsultant/MusicGen/docs/DATASETS.md deleted file mode 100644 index b0890c03cf732450eb498559638c6b45d50e40c3..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/docs/DATASETS.md +++ /dev/null @@ -1,82 +0,0 @@ -# AudioCraft datasets - -Our dataset manifest files consist in 1-json-per-line files, potentially gzipped, -as `data.jsons` or `data.jsons.gz` files. This JSON contains the path to the audio -file and associated metadata. The manifest files are then provided in the configuration, -as `datasource` sub-configuration. A datasource contains the pointers to the paths of -the manifest files for each AudioCraft stage (or split) along with additional information -(eg. maximum sample rate to use against this dataset). All the datasources are under the -`dset` group config, with a dedicated configuration file for each dataset. - -## Getting started - -### Example - -See the provided example in the directory that provides a manifest to use the example dataset -provided under the [dataset folder](../dataset/example). - -The manifest files are stored in the [egs folder](../egs/example). - -```shell -egs/ - example/data.json.gz -``` - -A datasource is defined in the configuration folder, in the dset group config for this dataset -at [config/dset/audio/example](../config/dset/audio/example.yaml): - -```shell -# @package __global__ - -datasource: - max_sample_rate: 44100 - max_channels: 2 - - train: egs/example - valid: egs/example - evaluate: egs/example - generate: egs/example -``` - -For proper dataset, one should create manifest for each of the splits and specify the correct path -to the given manifest in the datasource for each split. - -Then, using a dataset through the configuration can be done pointing to the -corresponding dataset configuration: -```shell -dset= # should match the yaml file name - -# for example -dset=audio/example -``` - -### Creating manifest files - -Assuming you want to create manifest files to load with AudioCraft's AudioDataset, you can use -the following command to create new manifest files from a given folder containing audio files: - -```shell -python -m audiocraft.data.audio_dataset egs/my_dataset/my_dataset_split/data.jsonl.gz - -# For example to generate the manifest for dset=audio/example -# note: we don't use any split and we don't compress the jsonl file for this dummy example -python -m audiocraft.data.audio_dataset dataset/example egs/example/data.jsonl - -# More info with: python -m audiocraft.data.audio_dataset --help -``` - -## Additional information - -### MusicDataset and metadata - -The MusicDataset is an AudioDataset with additional metadata. The MusicDataset expects -the additional metadata to be stored in a JSON file that has the same path as the corresponding -audio file, but with a `.json` extension. - -### SoundDataset and metadata - -The SoundDataset is an AudioDataset with descriptions metadata. Similarly to the MusicDataset, -the SoundDataset expects the additional metadata to be stored in a JSON file that has the same -path as the corresponding audio file, but with a `.json` extension. Additionally, the SoundDataset -supports an additional parameter pointing to an extra folder `external_metadata_source` containing -all the JSON metadata files given they have the same filename as the audio file. diff --git a/spaces/AICopilot/Dropbox/app.py b/spaces/AICopilot/Dropbox/app.py deleted file mode 100644 index d225932e5161ea8f36fcecf33a2354652fc2c1a1..0000000000000000000000000000000000000000 --- a/spaces/AICopilot/Dropbox/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import streamlit as st - -# query params exist -try: - options = ['cat', 'dog', 'mouse', 'bat', 'duck'] - - query_params = st.experimental_get_query_params() - query_option = query_params['option'][0] #throws an exception when visiting http://host:port - - option_selected = st.sidebar.selectbox('Pick option', - options, - index=options.index(query_option)) - if option_selected: - st.experimental_set_query_params(option=option_selected) - -# run when query params don't exist. e.g on first launch -except: # catch exception and set query param to predefined value - options = ['cat', 'dog', 'mouse', 'bat', 'duck'] - st.experimental_set_query_params(option=options[1]) # defaults to dog - - query_params = st.experimental_get_query_params() - query_option = query_params['option'][0] - - option_selected = st.sidebar.selectbox('Pick option', - options, - index=options.index(query_option)) - if option_selected: - st.experimental_set_query_params(option=option_selected) \ No newline at end of file diff --git a/spaces/AIFILMS/StyleGANEX/webUI/styleganex_model.py b/spaces/AIFILMS/StyleGANEX/webUI/styleganex_model.py deleted file mode 100644 index 18c679bffc56b0783da2c909a92f4568ec91adaf..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/webUI/styleganex_model.py +++ /dev/null @@ -1,492 +0,0 @@ -from __future__ import annotations -import numpy as np -import gradio as gr - -import os -import pathlib -import gc -import torch -import dlib -import cv2 -import PIL -from tqdm import tqdm -import numpy as np -import torch.nn.functional as F -import torchvision -from torchvision import transforms, utils -from argparse import Namespace -from datasets import augmentations -from huggingface_hub import hf_hub_download -from scripts.align_all_parallel import align_face -from latent_optimization import latent_optimization -from utils.inference_utils import save_image, load_image, visualize, get_video_crop_parameter, tensor2cv2, tensor2label, labelcolormap -from models.psp import pSp -from models.bisenet.model import BiSeNet -from models.stylegan2.model import Generator - -class Model(): - def __init__(self, device): - super().__init__() - - self.device = device - self.task_name = None - self.editing_w = None - self.pspex = None - self.landmarkpredictor = dlib.shape_predictor(hf_hub_download('PKUWilliamYang/VToonify', 'models/shape_predictor_68_face_landmarks.dat')) - self.transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), - ]) - self.to_tensor = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), - ]) - self.maskpredictor = BiSeNet(n_classes=19) - self.maskpredictor.load_state_dict(torch.load(hf_hub_download('PKUWilliamYang/VToonify', 'models/faceparsing.pth'), map_location='cpu')) - self.maskpredictor.to(self.device).eval() - self.parameters = {} - self.parameters['inversion'] = {'path':'pretrained_models/styleganex_inversion.pt', 'image_path':'./data/ILip77SbmOE.png'} - self.parameters['sr-32'] = {'path':'pretrained_models/styleganex_sr32.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'} - self.parameters['sr'] = {'path':'pretrained_models/styleganex_sr.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'} - self.parameters['sketch2face'] = {'path':'pretrained_models/styleganex_sketch2face.pt', 'image_path':'./data/234_sketch.jpg'} - self.parameters['mask2face'] = {'path':'pretrained_models/styleganex_mask2face.pt', 'image_path':'./data/540.jpg'} - self.parameters['edit_age'] = {'path':'pretrained_models/styleganex_edit_age.pt', 'image_path':'./data/390.mp4'} - self.parameters['edit_hair'] = {'path':'pretrained_models/styleganex_edit_hair.pt', 'image_path':'./data/390.mp4'} - self.parameters['toonify_pixar'] = {'path':'pretrained_models/styleganex_toonify_pixar.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'} - self.parameters['toonify_cartoon'] = {'path':'pretrained_models/styleganex_toonify_cartoon.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'} - self.parameters['toonify_arcane'] = {'path':'pretrained_models/styleganex_toonify_arcane.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'} - self.print_log = True - self.editing_dicts = torch.load(hf_hub_download('PKUWilliamYang/StyleGANEX', 'direction_dics.pt')) - self.generator = Generator(1024, 512, 8) - self.model_type = None - self.error_info = 'Error: no face detected! \ - StyleGANEX uses dlib.get_frontal_face_detector but sometimes it fails to detect a face. \ - You can try several times or use other images until a face is detected, \ - then switch back to the original image.' - - def load_model(self, task_name: str) -> None: - if task_name == self.task_name: - return - if self.pspex is not None: - del self.pspex - torch.cuda.empty_cache() - gc.collect() - path = self.parameters[task_name]['path'] - local_path = hf_hub_download('PKUWilliamYang/StyleGANEX', path) - ckpt = torch.load(local_path, map_location='cpu') - opts = ckpt['opts'] - opts['checkpoint_path'] = local_path - opts['device'] = self.device - opts = Namespace(**opts) - self.pspex = pSp(opts, ckpt).to(self.device).eval() - self.pspex.latent_avg = self.pspex.latent_avg.to(self.device) - if 'editing_w' in ckpt.keys(): - self.editing_w = ckpt['editing_w'].clone().to(self.device) - self.task_name = task_name - torch.cuda.empty_cache() - gc.collect() - - def load_G_model(self, model_type: str) -> None: - if model_type == self.model_type: - return - torch.cuda.empty_cache() - gc.collect() - local_path = hf_hub_download('rinong/stylegan-nada-models', model_type+'.pt') - self.generator.load_state_dict(torch.load(local_path, map_location='cpu')['g_ema'], strict=False) - self.generator.to(self.device).eval() - self.model_type = model_type - torch.cuda.empty_cache() - gc.collect() - - def tensor2np(self, img): - tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) - return tmp - - def process_sr(self, input_image: str, resize_scale: int, model: str) -> list[np.ndarray]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - #return [false_image, false_image], 'Error: fail to load empty file.' - raise gr.Error("Error: fail to load empty file.") - frame = cv2.imread(input_image) - if frame is None: - #return [false_image, false_image], 'Error: fail to load the image.' - raise gr.Error("Error: fail to load the image.") - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if model is None or model == 'SR for 32x': - task_name = 'sr-32' - resize_scale = 32 - else: - task_name = 'sr' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return [false_image, false_image], info - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = PIL.Image.fromarray(np.uint8(frame)) - x1 = augmentations.BilinearResize(factors=[resize_scale//4])(x1) - x1_up = x1.resize((W, H)) - x2_up = align_face(np.array(x1_up), self.landmarkpredictor) - if x2_up is None: - #return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.' - raise gr.Error(self.error_info) - x1_up = transforms.ToTensor()(x1_up).unsqueeze(dim=0).to(self.device) * 2 - 1 - x2_up = self.transform(x2_up).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - y_hat = torch.clamp(self.pspex(x1=x1_up, x2=x2_up, use_skip=self.pspex.opts.use_skip, resize=False), -1, 1) - - return [self.tensor2np(x1_up[0]), self.tensor2np(y_hat[0])] - - - def process_s2f(self, input_image: str, seed: int) -> np.ndarray: - task_name = 'sketch2face' - with torch.no_grad(): - x1 = transforms.ToTensor()(PIL.Image.open(input_image)).unsqueeze(0).to(self.device) - if x1.shape[2] > 513: - x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8] - if x1.shape[3] > 513: - x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8] - x1 = x1[:,0:1] # uploaded files will be transformed to 3-channel RGB image! - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - self.pspex.train() - torch.manual_seed(seed) - y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip, - inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7) - y_hat = torch.clamp(y_hat, -1, 1) - self.pspex.eval() - return self.tensor2np(y_hat[0]) - - def process_m2f(self, input_image: str, input_type: str, seed: int) -> list[np.ndarray]: - #false_image = np.zeros((256,256,3), np.uint8) - if input_image is None: - raise gr.Error('Error: fail to load empty file.' ) - #return [false_image, false_image], 'Error: fail to load empty file.' - task_name = 'mask2face' - with torch.no_grad(): - if input_type == 'parsing mask': - x1 = PIL.Image.open(input_image).getchannel(0) # uploaded files will be transformed to 3-channel RGB image! - x1 = augmentations.ToOneHot(19)(x1) - x1 = transforms.ToTensor()(x1).unsqueeze(dim=0).float().to(self.device) - #print(x1.shape) - else: - frame = cv2.imread(input_image) - if frame is None: - #return [false_image, false_image], 'Error: fail to load the image.' - raise gr.Error('Error: fail to load the image.' ) - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.' - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - # convert face image to segmentation mask - x1 = self.to_tensor(frame).unsqueeze(0).to(self.device) - # upsample image for precise segmentation - x1 = F.interpolate(x1, scale_factor=2, mode='bilinear') - x1 = self.maskpredictor(x1)[0] - x1 = F.interpolate(x1, scale_factor=0.5).argmax(dim=1) - x1 = F.one_hot(x1, num_classes=19).permute(0, 3, 1, 2).float().to(self.device) - - if x1.shape[2] > 513: - x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8] - if x1.shape[3] > 513: - x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8] - - x1_viz = (tensor2label(x1[0], 19) / 192 * 256).astype(np.uint8) - - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - self.pspex.train() - torch.manual_seed(seed) - y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip, - inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7) - y_hat = torch.clamp(y_hat, -1, 1) - self.pspex.eval() - return [x1_viz, self.tensor2np(y_hat[0])] - - - def process_editing(self, input_image: str, scale_factor: float, model_type: str) -> np.ndarray: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - #return false_image, false_image, 'Error: fail to load empty file.' - raise gr.Error('Error: fail to load empty file.') - frame = cv2.imread(input_image) - if frame is None: - #return false_image, false_image, 'Error: fail to load the image.' - raise gr.Error('Error: fail to load the image.') - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if model_type is None or model_type == 'reduce age': - task_name = 'edit_age' - else: - task_name = 'edit_hair' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return false_image, false_image, info - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - #return false_image, 'Error: no face detected! Please retry or change the photo.' - raise gr.Error(self.error_info) - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, - resize=False, editing_w= - scale_factor* self.editing_w[0:1]) - y_hat = torch.clamp(y_hat, -1, 1) - - return self.tensor2np(y_hat[0]) - - def process_vediting(self, input_video: str, scale_factor: float, model_type: str, frame_num: int) -> tuple[list[np.ndarray], str]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the video.' - - if input_video is None: - #return [false_image], 'default.mp4', 'Error: fail to load empty file.' - raise gr.Error('Error: fail to load empty file.') - video_cap = cv2.VideoCapture(input_video) - success, frame = video_cap.read() - if success is False: - #return [false_image], 'default.mp4', 'Error: fail to load the video.' - raise gr.Error('Error: fail to load the video.') - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if model_type is None or model_type == 'reduce age': - task_name = 'edit_age' - else: - task_name = 'edit_hair' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return [false_image], 'default.mp4', info - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - #return [false_image], 'default.mp4', info - raise gr.Error(self.error_info) - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('first frame loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H)) - - viz_frames = [] - for i in range(frame_num): - if i > 0: - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, - resize=False, editing_w= - scale_factor * self.editing_w[0:1]) - y_hat = torch.clamp(y_hat, -1, 1) - videoWriter.write(tensor2cv2(y_hat[0].cpu())) - if i < min(frame_num, 4): - viz_frames += [self.tensor2np(y_hat[0])] - - videoWriter.release() - - return viz_frames, 'output.mp4' - - - def process_toonify(self, input_image: str, style_type: str) -> np.ndarray: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - raise gr.Error('Error: fail to load empty file.') - #return false_image, false_image, 'Error: fail to load empty file.' - frame = cv2.imread(input_image) - if frame is None: - raise gr.Error('Error: fail to load the image.') - #return false_image, false_image, 'Error: fail to load the image.' - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if style_type is None or style_type == 'Pixar': - task_name = 'toonify_pixar' - elif style_type == 'Cartoon': - task_name = 'toonify_cartoon' - else: - task_name = 'toonify_arcane' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - raise gr.Error(self.error_info) - #return false_image, false_image, info - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - raise gr.Error(self.error_info) - #return false_image, 'Error: no face detected! Please retry or change the photo.' - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False) - y_hat = torch.clamp(y_hat, -1, 1) - - return self.tensor2np(y_hat[0]) - - - def process_vtoonify(self, input_video: str, style_type: str, frame_num: int) -> tuple[list[np.ndarray], str]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the video.' - - if input_video is None: - raise gr.Error('Error: fail to load empty file.') - #return [false_image], 'default.mp4', 'Error: fail to load empty file.' - video_cap = cv2.VideoCapture(input_video) - success, frame = video_cap.read() - if success is False: - raise gr.Error('Error: fail to load the video.') - #return [false_image], 'default.mp4', 'Error: fail to load the video.' - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if style_type is None or style_type == 'Pixar': - task_name = 'toonify_pixar' - elif style_type == 'Cartoon': - task_name = 'toonify_cartoon' - else: - task_name = 'toonify_arcane' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - raise gr.Error(self.error_info) - #return [false_image], 'default.mp4', info - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - raise gr.Error(self.error_info) - #return [false_image], 'default.mp4', info - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('first frame loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H)) - - viz_frames = [] - for i in range(frame_num): - if i > 0: - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False) - y_hat = torch.clamp(y_hat, -1, 1) - videoWriter.write(tensor2cv2(y_hat[0].cpu())) - if i < min(frame_num, 4): - viz_frames += [self.tensor2np(y_hat[0])] - - videoWriter.release() - - return viz_frames, 'output.mp4' - - - def process_inversion(self, input_image: str, optimize: str, input_latent: file-object, editing_options: str, - scale_factor: float, seed: int) -> tuple[np.ndarray, np.ndarray]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - raise gr.Error('Error: fail to load empty file.') - #return false_image, false_image, 'Error: fail to load empty file.' - frame = cv2.imread(input_image) - if frame is None: - raise gr.Error('Error: fail to load the image.') - #return false_image, false_image, 'Error: fail to load the image.' - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - task_name = 'inversion' - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - if input_latent is not None: - if '.pt' not in input_latent.name: - raise gr.Error('Error: the latent format is wrong') - #return false_image, false_image, 'Error: the latent format is wrong' - latents = torch.load(input_latent.name) - if 'wplus' not in latents.keys() or 'f' not in latents.keys(): - raise gr.Error('Error: the latent format is wrong') - #return false_image, false_image, 'Error: the latent format is wrong' - wplus = latents['wplus'].to(self.device) # w+ - f = [latents['f'][0].to(self.device)] # f - elif optimize == 'Latent optimization': - wplus, f, _, _, _ = latent_optimization(frame, self.pspex, self.landmarkpredictor, - step=500, device=self.device) - else: - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - raise gr.Error(self.error_info) - #return false_image, false_image, info - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - raise gr.Error(self.error_info) - #return false_image, false_image, 'Error: no face detected! Please retry or change the photo.' - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - wplus = self.pspex.encoder(x2) + self.pspex.latent_avg.unsqueeze(0) - _, f = self.pspex.encoder(x1, return_feat=True) - - with torch.no_grad(): - y_hat, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f) - y_hat = torch.clamp(y_hat, -1, 1) - - if 'Style Mixing' in editing_options: - torch.manual_seed(seed) - wplus[:, 8:] = self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,10,1) * 0.7 - y_hat_edit, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f) - elif 'Attribute Editing' in editing_options: - editing_w = self.editing_dicts[editing_options[19:]].to(self.device) - y_hat_edit, _ = self.pspex.decoder([wplus+scale_factor*editing_w], input_is_latent=True, first_layer_feature=f) - elif 'Domain Transfer' in editing_options: - self.load_G_model(editing_options[17:]) - if self.print_log: print('model %s loaded'%(editing_options[17:])) - y_hat_edit, _ = self.generator([wplus], input_is_latent=True, first_layer_feature=f) - else: - y_hat_edit = y_hat - y_hat_edit = torch.clamp(y_hat_edit, -1, 1) - - return self.tensor2np(y_hat[0]), self.tensor2np(y_hat_edit[0]) \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffsinger_task.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffsinger_task.py deleted file mode 100644 index 78e65447fe78ca80bd0e67ebe5581794fc3764aa..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffsinger_task.py +++ /dev/null @@ -1,490 +0,0 @@ -import torch - -import utils -from utils.hparams import hparams -from modules.diff.net import DiffNet -from modules.diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion -from tasks.svs.diffspeech_task import DiffSpeechTask -from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder -from modules.fastspeech.pe import PitchExtractor -from modules.fastspeech.fs2 import FastSpeech2 -from modules.diffsinger_midi.fs2 import FastSpeech2MIDI -from modules.fastspeech.tts_modules import mel2ph_to_dur - -from modules.diff.candidate_decoder import FFT -from utils.pitch_utils import denorm_f0 -from tasks.tts.fs2_utils import FastSpeechDataset -from tasks.tts.fs2 import FastSpeech2Task - -import numpy as np -import os -import torch.nn.functional as F - -DIFF_DECODERS = { - 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']), - 'fft': lambda hp: FFT( - hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']), -} - - -class DiffSingerTask(DiffSpeechTask): - def __init__(self): - super(DiffSingerTask, self).__init__() - self.dataset_cls = FastSpeechDataset - self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - self.pe = PitchExtractor().cuda() - utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) - self.pe.eval() - - def build_tts_model(self): - # import torch - # from tqdm import tqdm - # v_min = torch.ones([80]) * 100 - # v_max = torch.ones([80]) * -100 - # for i, ds in enumerate(tqdm(self.dataset_cls('train'))): - # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max) - # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min) - # if i % 100 == 0: - # print(i, v_min, v_max) - # print('final', v_min, v_max) - mel_bins = hparams['audio_num_mel_bins'] - self.model = GaussianDiffusion( - phone_encoder=self.phone_encoder, - out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), - timesteps=hparams['timesteps'], - K_step=hparams['K_step'], - loss_type=hparams['diff_loss_type'], - spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], - ) - if hparams['fs2_ckpt'] != '': - utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) - # self.model.fs2.decoder = None - for k, v in self.model.fs2.named_parameters(): - v.requires_grad = False - - def validation_step(self, sample, batch_idx): - outputs = {} - txt_tokens = sample['txt_tokens'] # [B, T_t] - - target = sample['mels'] # [B, T_s, 80] - energy = sample['energy'] - # fs2_mel = sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - mel2ph = sample['mel2ph'] - f0 = sample['f0'] - uv = sample['uv'] - - outputs['losses'] = {} - - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) - - - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = utils.tensors_to_scalars(outputs) - if batch_idx < hparams['num_valid_plots']: - model_out = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True) - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel - else: - gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) - pred_f0 = model_out.get('f0_denorm') - self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') - self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}') - return outputs - - -class ShallowDiffusionOfflineDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index) - item = self._get_item(index) - - if self.prefix != 'train' and hparams['fs2_ckpt'] != '': - fs2_ckpt = os.path.dirname(hparams['fs2_ckpt']) - item_name = item['item_name'] - fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer. - sample['fs2_mel'] = fs2_mel - return sample - - def collater(self, samples): - batch = super(ShallowDiffusionOfflineDataset, self).collater(samples) - if self.prefix != 'train' and hparams['fs2_ckpt'] != '': - batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0) - return batch - - -class DiffSingerOfflineTask(DiffSingerTask): - def __init__(self): - super(DiffSingerOfflineTask, self).__init__() - self.dataset_cls = ShallowDiffusionOfflineDataset - - def build_tts_model(self): - mel_bins = hparams['audio_num_mel_bins'] - self.model = OfflineGaussianDiffusion( - phone_encoder=self.phone_encoder, - out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), - timesteps=hparams['timesteps'], - K_step=hparams['K_step'], - loss_type=hparams['diff_loss_type'], - spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], - ) - # if hparams['fs2_ckpt'] != '': - # utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) - # self.model.fs2.decoder = None - - def run_model(self, model, sample, return_output=False, infer=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - fs2_mel = None #sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer) - - losses = {} - if 'diff_loss' in output: - losses['mel'] = output['diff_loss'] - # self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) - # if hparams['use_pitch_embed']: - # self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - - if not return_output: - return losses - else: - return losses, output - - def validation_step(self, sample, batch_idx): - outputs = {} - txt_tokens = sample['txt_tokens'] # [B, T_t] - - target = sample['mels'] # [B, T_s, 80] - energy = sample['energy'] - # fs2_mel = sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - mel2ph = sample['mel2ph'] - f0 = sample['f0'] - uv = sample['uv'] - - outputs['losses'] = {} - - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) - - - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = utils.tensors_to_scalars(outputs) - if batch_idx < hparams['num_valid_plots']: - fs2_mel = sample['fs2_mels'] - model_out = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, - ref_mels=[None, fs2_mel], infer=True) - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel - else: - gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) - pred_f0 = model_out.get('f0_denorm') - self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') - self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}') - return outputs - - def test_step(self, sample, batch_idx): - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - txt_tokens = sample['txt_tokens'] - energy = sample['energy'] - if hparams['profile_infer']: - pass - else: - mel2ph, uv, f0 = None, None, None - if hparams['use_gt_dur']: - mel2ph = sample['mel2ph'] - if hparams['use_gt_f0']: - f0 = sample['f0'] - uv = sample['uv'] - fs2_mel = sample['fs2_mels'] - outputs = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy, - infer=True) - sample['outputs'] = self.model.out2mel(outputs['mel_out']) - sample['mel2ph_pred'] = outputs['mel2ph'] - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel - else: - sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams) - sample['f0_pred'] = outputs.get('f0_denorm') - return self.after_infer(sample) - - -class MIDIDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(MIDIDataset, self).__getitem__(index) - item = self._get_item(index) - sample['f0_midi'] = torch.FloatTensor(item['f0_midi']) - sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']] - - return sample - - def collater(self, samples): - batch = super(MIDIDataset, self).collater(samples) - batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0) - batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0) - # print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all()) - return batch - - -class OpencpopDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(OpencpopDataset, self).__getitem__(index) - item = self._get_item(index) - sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']] - sample['midi_dur'] = torch.FloatTensor(item['midi_dur'])[:hparams['max_frames']] - sample['is_slur'] = torch.LongTensor(item['is_slur'])[:hparams['max_frames']] - sample['word_boundary'] = torch.LongTensor(item['word_boundary'])[:hparams['max_frames']] - return sample - - def collater(self, samples): - batch = super(OpencpopDataset, self).collater(samples) - batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0) - batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0) - batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0) - batch['word_boundary'] = utils.collate_1d([s['word_boundary'] for s in samples], 0) - return batch - - -class DiffSingerMIDITask(DiffSingerTask): - def __init__(self): - super(DiffSingerMIDITask, self).__init__() - # self.dataset_cls = MIDIDataset - self.dataset_cls = OpencpopDataset - - def run_model(self, model, sample, return_output=False, infer=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s] - mel2ph = sample['mel2ph'] - if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']: - f0 = None - uv = None - else: - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'], - midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - - losses = {} - if 'diff_loss' in output: - losses['mel'] = output['diff_loss'] - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - if not return_output: - return losses - else: - return losses, output - - def validation_step(self, sample, batch_idx): - outputs = {} - txt_tokens = sample['txt_tokens'] # [B, T_t] - - target = sample['mels'] # [B, T_s, 80] - energy = sample['energy'] - # fs2_mel = sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - mel2ph = sample['mel2ph'] - - outputs['losses'] = {} - - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) - - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = utils.tensors_to_scalars(outputs) - if batch_idx < hparams['num_valid_plots']: - model_out = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True, - pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel - else: - gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) - pred_f0 = model_out.get('f0_denorm') - self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') - self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}') - if hparams['use_pitch_embed']: - self.plot_pitch(batch_idx, sample, model_out) - return outputs - - def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None): - """ - :param dur_pred: [B, T], float, log scale - :param mel2ph: [B, T] - :param txt_tokens: [B, T] - :param losses: - :return: - """ - B, T = txt_tokens.shape - nonpadding = (txt_tokens != 0).float() - dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding - is_sil = torch.zeros_like(txt_tokens).bool() - for p in self.sil_ph: - is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) - is_sil = is_sil.float() # [B, T_txt] - - # phone duration loss - if hparams['dur_loss'] == 'mse': - losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') - losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() - dur_pred = (dur_pred.exp() - 1).clamp(min=0) - else: - raise NotImplementedError - - # use linear scale for sent and word duration - if hparams['lambda_word_dur'] > 0: - idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1] - # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur - word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred) - word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt) - wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') - word_nonpadding = (word_dur_g > 0).float() - wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() - losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - -class AuxDecoderMIDITask(FastSpeech2Task): - def __init__(self): - super().__init__() - # self.dataset_cls = MIDIDataset - self.dataset_cls = OpencpopDataset - - def build_tts_model(self): - if hparams.get('use_midi') is not None and hparams['use_midi']: - self.model = FastSpeech2MIDI(self.phone_encoder) - else: - self.model = FastSpeech2(self.phone_encoder) - - def run_model(self, model, sample, return_output=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False, pitch_midi=sample['pitch_midi'], - midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - - losses = {} - self.add_mel_loss(output['mel_out'], target, losses) - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - if not return_output: - return losses - else: - return losses, output - - def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None): - """ - :param dur_pred: [B, T], float, log scale - :param mel2ph: [B, T] - :param txt_tokens: [B, T] - :param losses: - :return: - """ - B, T = txt_tokens.shape - nonpadding = (txt_tokens != 0).float() - dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding - is_sil = torch.zeros_like(txt_tokens).bool() - for p in self.sil_ph: - is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) - is_sil = is_sil.float() # [B, T_txt] - - # phone duration loss - if hparams['dur_loss'] == 'mse': - losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') - losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() - dur_pred = (dur_pred.exp() - 1).clamp(min=0) - else: - raise NotImplementedError - - # use linear scale for sent and word duration - if hparams['lambda_word_dur'] > 0: - idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1] - # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur - word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred) - word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt) - wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') - word_nonpadding = (word_dur_g > 0).float() - wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() - losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - def validation_step(self, sample, batch_idx): - outputs = {} - outputs['losses'] = {} - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True) - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - mel_out = self.model.out2mel(model_out['mel_out']) - outputs = utils.tensors_to_scalars(outputs) - # if sample['mels'].shape[0] == 1: - # self.add_laplace_var(mel_out, sample['mels'], outputs) - if batch_idx < hparams['num_valid_plots']: - self.plot_mel(batch_idx, sample['mels'], mel_out) - self.plot_dur(batch_idx, sample, model_out) - if hparams['use_pitch_embed']: - self.plot_pitch(batch_idx, sample, model_out) - return outputs \ No newline at end of file diff --git a/spaces/AIWaves/Debate/README.md b/spaces/AIWaves/Debate/README.md deleted file mode 100644 index da0d4912c994bd7f03ee141e9477412b145001aa..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Debate/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Debate -emoji: 🐠 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/templates.py b/spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/templates.py deleted file mode 100644 index 2c64194b42f0115f8a95b2749256a3237ab44757..0000000000000000000000000000000000000000 --- a/spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/templates.py +++ /dev/null @@ -1,44 +0,0 @@ -css = ''' - - - -

Yunzai 云媒体服务器

-

当前服务支持音频转码、网页截图、网址访问检查

-

音频转码

-

描述

-

将音频链接、数据、文件转换成 SILK 格式数据,可直接发送音频文件到接口

-

请求说明

-

请求方式:POST
-请求 URL :/audio

-
-

请求参数

- - - - - - - - - - -
字段字段类型字段说明
recordUrlstring原始音频链接
recordBufferobject原始音频数据
recordBuffer.typestring数据类型
recordBuffer.dataarray数据
-
-

网页截图

-

描述

-

将音频链接、数据、文件转换成 SILK 格式数据,可直接发送音频文件到接口

-

请求说明

-

请求方式:POST
-请求 URL :/screenshot

-
-

请求参数

- - - - - - - - - - - - - - - - - -
字段字段类型字段说明
urlstring请求的网址
optionobject参数
option.widthint渲染窗口宽度
option.heightint渲染窗口高度
option.dprint渲染DPR
option.timeoutint访问超时时间
option.waitint页面等待时间
option.waitUtilstring('load'、'domcontentloaded'、'networkidle0'、'networkidle2')waitUtil 参数
option.funcintwaitFunction参数
option.selectorstring页面加载完成选择器
typestring返回类型,可选 base64 和 image
-
-

网址访问检查

-

描述

-

检查网址是否能够正常访问

-

请求说明

-

请求方式:POST
-请求 URL :/check

-
-

请求参数

- - - - - - - -
字段字段类型字段说明
urlstring需检查的网址
- - - - \ No newline at end of file diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_head.py deleted file mode 100644 index b12416fa8332f02b9a04bbfc7926f6d13875e61b..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_head.py +++ /dev/null @@ -1,114 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init - -from ..builder import HEADS -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class RetinaHead(AnchorHead): - r"""An anchor-based head used in `RetinaNet - `_. - - The head contains two subnetworks. The first classifies anchor boxes and - the second regresses deltas for the anchors. - - Example: - >>> import torch - >>> self = RetinaHead(11, 7) - >>> x = torch.rand(1, 7, 32, 32) - >>> cls_score, bbox_pred = self.forward_single(x) - >>> # Each anchor predicts a score for each class except background - >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors - >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors - >>> assert cls_per_anchor == (self.num_classes) - >>> assert box_per_anchor == 4 - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - **kwargs): - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(RetinaHead, self).__init__( - num_classes, - in_channels, - anchor_generator=anchor_generator, - **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.retina_cls = nn.Conv2d( - self.feat_channels, - self.num_anchors * self.cls_out_channels, - 3, - padding=1) - self.retina_reg = nn.Conv2d( - self.feat_channels, self.num_anchors * 4, 3, padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.cls_convs: - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - normal_init(m.conv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.retina_cls, std=0.01, bias=bias_cls) - normal_init(self.retina_reg, std=0.01) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale - level, the channels number is num_anchors * 4. - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_pred = self.retina_reg(reg_feat) - return cls_score, bbox_pred diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/sparse_rcnn.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/sparse_rcnn.py deleted file mode 100644 index 0dbd0250f189e610a0bbc72b0dab2559e26857ae..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/sparse_rcnn.py +++ /dev/null @@ -1,110 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class SparseRCNN(TwoStageDetector): - r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with - Learnable Proposals `_""" - - def __init__(self, *args, **kwargs): - super(SparseRCNN, self).__init__(*args, **kwargs) - assert self.with_rpn, 'Sparse R-CNN do not support external proposals' - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - proposals=None, - **kwargs): - """Forward function of SparseR-CNN in train stage. - - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (List[Tensor], optional) : Segmentation masks for - each box. But we don't support it in this architecture. - proposals (List[Tensor], optional): override rpn proposals with - custom proposals. Use when `with_rpn` is False. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - - assert proposals is None, 'Sparse R-CNN does not support' \ - ' external proposals' - assert gt_masks is None, 'Sparse R-CNN does not instance segmentation' - - x = self.extract_feat(img) - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.forward_train(x, img_metas) - roi_losses = self.roi_head.forward_train( - x, - proposal_boxes, - proposal_features, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=gt_bboxes_ignore, - gt_masks=gt_masks, - imgs_whwh=imgs_whwh) - return roi_losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test time augmentation. - - Args: - imgs (list[torch.Tensor]): List of multiple images - img_metas (list[dict]): List of image information. - rescale (bool): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - x = self.extract_feat(img) - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.simple_test_rpn(x, img_metas) - bbox_results = self.roi_head.simple_test( - x, - proposal_boxes, - proposal_features, - img_metas, - imgs_whwh=imgs_whwh, - rescale=rescale) - return bbox_results - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - # backbone - x = self.extract_feat(img) - # rpn - num_imgs = len(img) - dummy_img_metas = [ - dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs) - ] - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.simple_test_rpn(x, dummy_img_metas) - # roi_head - roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, - proposal_features, - dummy_img_metas) - return roi_outs diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index d311e33f56ba431a882b0e7079001b0e9932a011..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/AnnasBlackHat/Image-Similarity/README.md b/spaces/AnnasBlackHat/Image-Similarity/README.md deleted file mode 100644 index 3f48c594dc5e31a28af3221e6a1f2b8f45b5bf13..0000000000000000000000000000000000000000 --- a/spaces/AnnasBlackHat/Image-Similarity/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image Similarity -emoji: 🐨 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.16.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Anonymous-sub/Rerender/src/config.py b/spaces/Anonymous-sub/Rerender/src/config.py deleted file mode 100644 index 5f06d1a678405ec0d52dd9e3a86d268a304a80a8..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/src/config.py +++ /dev/null @@ -1,144 +0,0 @@ -import json -import os -from typing import Optional, Sequence, Tuple - -from src.video_util import get_frame_count - - -class RerenderConfig: - - def __init__(self): - ... - - def create_from_parameters(self, - input_path: str, - output_path: str, - prompt: str, - work_dir: Optional[str] = None, - key_subdir: str = 'keys', - frame_count: Optional[int] = None, - interval: int = 10, - crop: Sequence[int] = (0, 0, 0, 0), - sd_model: Optional[str] = None, - a_prompt: str = '', - n_prompt: str = '', - ddim_steps=20, - scale=7.5, - control_type: str = 'HED', - control_strength=1, - seed: int = -1, - image_resolution: int = 512, - x0_strength: float = -1, - style_update_freq: int = 10, - cross_period: Tuple[float, float] = (0, 1), - warp_period: Tuple[float, float] = (0, 0.1), - mask_period: Tuple[float, float] = (0.5, 0.8), - ada_period: Tuple[float, float] = (1.0, 1.0), - mask_strength: float = 0.5, - inner_strength: float = 0.9, - smooth_boundary: bool = True, - color_preserve: bool = True, - **kwargs): - self.input_path = input_path - self.output_path = output_path - self.prompt = prompt - self.work_dir = work_dir - if work_dir is None: - self.work_dir = os.path.dirname(output_path) - self.key_dir = os.path.join(self.work_dir, key_subdir) - self.first_dir = os.path.join(self.work_dir, 'first') - - # Split video into frames - if not os.path.isfile(input_path): - raise FileNotFoundError(f'Cannot find video file {input_path}') - self.input_dir = os.path.join(self.work_dir, 'video') - - self.frame_count = frame_count - if frame_count is None: - self.frame_count = get_frame_count(self.input_path) - self.interval = interval - self.crop = crop - self.sd_model = sd_model - self.a_prompt = a_prompt - self.n_prompt = n_prompt - self.ddim_steps = ddim_steps - self.scale = scale - self.control_type = control_type - if self.control_type == 'canny': - self.canny_low = kwargs.get('canny_low', 100) - self.canny_high = kwargs.get('canny_high', 200) - else: - self.canny_low = None - self.canny_high = None - self.control_strength = control_strength - self.seed = seed - self.image_resolution = image_resolution - self.x0_strength = x0_strength - self.style_update_freq = style_update_freq - self.cross_period = cross_period - self.mask_period = mask_period - self.warp_period = warp_period - self.ada_period = ada_period - self.mask_strength = mask_strength - self.inner_strength = inner_strength - self.smooth_boundary = smooth_boundary - self.color_preserve = color_preserve - - os.makedirs(self.input_dir, exist_ok=True) - os.makedirs(self.work_dir, exist_ok=True) - os.makedirs(self.key_dir, exist_ok=True) - os.makedirs(self.first_dir, exist_ok=True) - - def create_from_path(self, cfg_path: str): - with open(cfg_path, 'r') as fp: - cfg = json.load(fp) - kwargs = dict() - - def append_if_not_none(key): - value = cfg.get(key, None) - if value is not None: - kwargs[key] = value - - kwargs['input_path'] = cfg['input'] - kwargs['output_path'] = cfg['output'] - kwargs['prompt'] = cfg['prompt'] - append_if_not_none('work_dir') - append_if_not_none('key_subdir') - append_if_not_none('frame_count') - append_if_not_none('interval') - append_if_not_none('crop') - append_if_not_none('sd_model') - append_if_not_none('a_prompt') - append_if_not_none('n_prompt') - append_if_not_none('ddim_steps') - append_if_not_none('scale') - append_if_not_none('control_type') - if kwargs.get('control_type', '') == 'canny': - append_if_not_none('canny_low') - append_if_not_none('canny_high') - append_if_not_none('control_strength') - append_if_not_none('seed') - append_if_not_none('image_resolution') - append_if_not_none('x0_strength') - append_if_not_none('style_update_freq') - append_if_not_none('cross_period') - append_if_not_none('warp_period') - append_if_not_none('mask_period') - append_if_not_none('ada_period') - append_if_not_none('mask_strength') - append_if_not_none('inner_strength') - append_if_not_none('smooth_boundary') - append_if_not_none('color_perserve') - self.create_from_parameters(**kwargs) - - @property - def use_warp(self): - return self.warp_period[0] <= self.warp_period[1] - - @property - def use_mask(self): - return self.mask_period[0] <= self.mask_period[1] - - @property - def use_ada(self): - return self.ada_period[0] <= self.ada_period[1] diff --git a/spaces/Arvi/Performance_predictor_and_feedback_generator/app.py b/spaces/Arvi/Performance_predictor_and_feedback_generator/app.py deleted file mode 100644 index d382c8cf74e6004ccfd3acb44f1832478396ab37..0000000000000000000000000000000000000000 --- a/spaces/Arvi/Performance_predictor_and_feedback_generator/app.py +++ /dev/null @@ -1,410 +0,0 @@ -def assign_weights(Name,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13,col14,col15): - import gradio as gr - import pandas as pd - import numpy as np - df=pd.read_csv('/content/final_processed.csv') - df.drop(['Unnamed: 0'], axis=1,inplace=True) - from sklearn import preprocessing - label_encoder = preprocessing.LabelEncoder() - - - y={'academic time':col2,'task dedication':col3,'physical activity':col4,'favourite sport':col5,'family time':col6,'poor sleep':col7,'fitness':col8, - 'loss of concentration':col9,'eating habits':col10,'free time':col11,'motivation':col12,'social media':col13,'social media on academics':col14,'performance':col15} - df=df.append(y,ignore_index=True) - - - df['academic time']= label_encoder.fit_transform(df['academic time']) - df['task dedication']= label_encoder.fit_transform(df['task dedication']) - df['physical activity']= label_encoder.fit_transform(df['physical activity']) - df['favorite sport']= label_encoder.fit_transform(df['favorite sport']) - df['family time']= label_encoder.fit_transform(df['family time']) - df['poor sleep']= label_encoder.fit_transform(df['poor sleep']) - df['fitness']= label_encoder.fit_transform(df['fitness']) - df['loss of concentration']= label_encoder.fit_transform(df['loss of concentration']) - df['eating habits']= label_encoder.fit_transform(df['eating habits']) - df['free time']= label_encoder.fit_transform(df['free time']) - df['motivation']= label_encoder.fit_transform(df['motivation']) - df['social media']= label_encoder.fit_transform(df['social media']) - df['socail media on academics']= label_encoder.fit_transform(df['socail media on academics']) - df['performance']= label_encoder.fit_transform(df['performance']) - - df.loc[df['academic time'] == 4, 'weight_academic'] =0.45 - df.loc[df['academic time'] == 1, 'weight_academic'] =0.15 - df.loc[df['academic time'] == 0, 'weight_academic'] =0.05 - df.loc[df['academic time'] == 2, 'weight_academic'] =0.35 - df.loc[df['academic time'] == 3, 'weight_academic'] =0.00 - - df.loc[df['task dedication'] == 0, 'weight_task'] =0.00 - df.loc[df['task dedication'] == 1, 'weight_task'] =0.05 - df.loc[df['task dedication'] == 2, 'weight_task'] =0.20 - df.loc[df['task dedication'] == 3, 'weight_task'] =0.25 - df.loc[df['task dedication'] == 4, 'weight_task'] =0.50 - - df.loc[df['physical activity'] == 0, 'weight_physic'] =0.00 - df.loc[df['physical activity'] == 1, 'weight_physic'] =1.00 - - df.loc[df['favorite sport'] == 0, 'weight_play'] =0.20 - df.loc[df['favorite sport'] == 1, 'weight_play'] =0.20 - df.loc[df['favorite sport'] == 2, 'weight_play'] =0.20 - df.loc[df['favorite sport'] == 3, 'weight_play'] =0.20 - df.loc[df['favorite sport'] == 4, 'weight_play'] =0.00 - df.loc[df['favorite sport'] == 5, 'weight_play'] =0.20 - - df.loc[df['family time'] == 3, 'weight_familytime'] =0.40 - df.loc[df['family time'] == 2, 'weight_familytime'] =0.10 - df.loc[df['family time'] == 1, 'weight_familytime'] =0.00 - df.loc[df['family time'] == 0, 'weight_familytime'] =0.40 - df.loc[df['family time'] == 4, 'weight_familytime'] =0.10 - - df.loc[df['poor sleep'] == 4, 'weight_sleep'] =0.00 - df.loc[df['poor sleep'] == 3, 'weight_sleep'] =0.05 - df.loc[df['poor sleep'] == 0, 'weight_sleep'] =0.00 - df.loc[df['poor sleep'] == 2, 'weight_sleep'] =0.40 - df.loc[df['poor sleep'] == 1, 'weight_sleep'] =0.55 - - df.loc[df['loss of concentration'] == 4, 'weight_conc'] =0.20 - df.loc[df['loss of concentration'] == 0, 'weight_conc'] =0.05 - df.loc[df['loss of concentration'] == 1, 'weight_conc'] =0.00 - df.loc[df['loss of concentration'] == 3, 'weight_conc'] =0.75 - df.loc[df['loss of concentration'] == 2, 'weight_conc'] =0.05 - - df.loc[df['eating habits'] == 4, 'weight_eating'] =0.20 - df.loc[df['eating habits'] == 0, 'weight_eating'] =0.05 - df.loc[df['eating habits'] == 1, 'weight_eating'] =0.00 - df.loc[df['eating habits'] == 3, 'weight_eating'] =0.75 - df.loc[df['eating habits'] == 2, 'weight_eating'] =0.05 - - df.loc[df['fitness'] == 2, 'weight_fit'] =0.60 - df.loc[df['fitness'] == 0, 'weight_fit'] =0.10 - df.loc[df['fitness'] == 1, 'weight_fit'] =0.30 - df.loc[df['fitness'] == 3, 'weight_fit'] =0.00 - - df.loc[df['free time'] == 3, 'weight_time'] =0.50 - df.loc[df['free time'] == 2, 'weight_time'] =0.10 - df.loc[df['free time'] == 1, 'weight_time'] =0.20 - df.loc[df['free time'] == 0, 'weight_time'] =0.20 - - df.loc[df['motivation'] == 3, 'weight_motivation'] =0.30 - df.loc[df['motivation'] == 2, 'weight_motivation'] =0.25 - df.loc[df['motivation'] == 1, 'weight_motivation'] =0.25 - df.loc[df['motivation'] == 0, 'weight_motivation'] =0.20 - - df.loc[df['social media'] == 3, 'weight_media'] =0.00 - df.loc[df['social media'] == 2, 'weight_media'] =0.65 - df.loc[df['social media'] == 1, 'weight_media'] =0.10 - df.loc[df['social media'] == 0, 'weight_media'] =0.25 - - - df.loc[df['socail media on academics'] == 0, 'weight_media_academics'] =0.00 - df.loc[df['socail media on academics'] == 1, 'weight_media_academics'] =1.00 - - df.loc[df['performance'] == 4, 'weight_performance']=0.55 - df.loc[df['performance'] == 3, 'weight_performance']=0.00 - df.loc[df['performance'] == 2, 'weight_performance']=0.30 - df.loc[df['performance'] == 1, 'weight_performance']=0.10 - df.loc[df['performance'] == 0, 'weight_performance']=0.05 - - df['total']=df.iloc[:,14:].sum(axis=1) - - - df.loc[(df['weight_academic']<0.35) | (df['weight_task']<0.25),'academic value']=0 - df.loc[(df['weight_academic']>=0.35) & (df['weight_task']>=0.25),'academic value']=1 - df.inplace=1 - - df.loc[(df['weight_academic']<0.35) | (df['weight_time']<0.20),'time value']=0 - df.loc[(df['weight_academic']>=0.35) & (df['weight_time']>=0.20),'time value']=1 - df.inplace=1 - - df.loc[((df['weight_academic']<=0.35) & (df['weight_conc']>=0.20)) | ((df['weight_academic']>=0.35) & (df['weight_conc']>=0.20)),'productive value']=1 - df.loc[((df['weight_academic']>=0.35) & (df['weight_conc']<0.20)) | ((df['weight_academic']<0.35) & (df['weight_conc']<0.20)),'productive value']=0 - df.inplace=1 - - df.loc[(df['weight_physic']==1) & (df['weight_play']==0.2) & (df['weight_fit']>=0.3) & (df['weight_eating']>=0.20),'fitness_value']=1 - df.loc[(df['weight_physic']!=1) | (df['weight_play']!=0.2) | (df['weight_fit']<0.3) | (df['weight_eating']<0.20),'fitness_value']=0 - df.inplace=1 - - - df.loc[(df['weight_sleep']>=0.40) & (df['weight_conc']>=0.20) ,'sleep value']=1 - df.loc[(df['weight_sleep']<0.40) | (df['weight_conc']<0.20),'sleep value']=0 - df.inplace=1 - - df.loc[(df['weight_familytime']==0.40) & (df['weight_motivation']==0.25) ,'motivation value']=1 - df.loc[(df['weight_familytime']!=0.40) | (df['weight_motivation']!=0.25),'motivation value']=0 - df.inplace=1 - - df.loc[(df['weight_performance']>=0.30) ,'performance_value']=1 - df.loc[(df['weight_performance']<0.30),'performance_value']=0 - df.inplace=1 - - df.loc[(df['weight_media']>=0.25) & (df['weight_media_academics']==0.00) ,'media_value']=1 - df.loc[(df['weight_media']<0.25) | (df['weight_media_academics']!=0.00),'media_value']=0 - df.inplace=1 - - df.loc[df['total']>=4.0,'overall']=1 - df.loc[df['total']<4.0,'overall']=0 - df.inplace=1 - - - X = df[['academic time', - 'task dedication', - 'physical activity', - 'favorite sport', - 'family time', - 'poor sleep', - 'fitness', - 'loss of concentration', - 'eating habits', - 'free time', - 'motivation', - 'social media', - 'socail media on academics', - 'performance', - 'weight_academic', - 'weight_task', - 'weight_physic', - 'weight_play', - 'weight_familytime', - 'weight_sleep', - 'weight_conc', - 'weight_eating', - 'weight_fit', - 'weight_time', - 'weight_motivation', - 'weight_media', - 'weight_media_academics', - 'weight_performance', - 'total' - ]] - y1 = df['academic value'] - y2=df['time value'] - y3=df['productive value'] - y4=df['fitness_value'] - y5=df['sleep value'] - y6=df['motivation value'] - y7=df['performance_value'] - y8=df['media_value'] - y9=df['overall'] - from sklearn.model_selection import train_test_split - X_train,X_test,y1_train,y1_test = train_test_split(X,y1,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y2_train,y2_test = train_test_split(X,y2,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y3_train,y3_test = train_test_split(X,y3,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y4_train,y4_test = train_test_split(X,y4,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y5_train,y5_test = train_test_split(X,y5,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y6_train,y6_test = train_test_split(X,y6,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y7_train,y7_test = train_test_split(X,y7,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y8_train,y8_test = train_test_split(X,y8,test_size=0.3,random_state = 0,shuffle = True) - X_train,X_test,y9_train,y9_test = train_test_split(X,y9,test_size=0.3,random_state = 0,shuffle = True) - from sklearn.ensemble import RandomForestClassifier as rfc - import xgboost as xgb - rfc1 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc2 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc3 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc4 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc5 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc6 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc7 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc8 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc9 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, - max_depth = 5, alpha = 10, n_estimators = 10) - rfc1.fit(X_train,y1_train) - rfc2.fit(X_train,y2_train) - rfc3.fit(X_train,y3_train) - rfc4.fit(X_train,y4_train) - rfc5.fit(X_train,y5_train) - rfc6.fit(X_train,y6_train) - rfc7.fit(X_train,y7_train) - rfc8.fit(X_train,y8_train) - rfc9.fit(X_train,y9_train) - import random - - z=df.tail(1) - - - - - if z['academic value'].eq(1).all(): - a=['You are in the right track just try to stick on to your schedule','HARRRRRDDDD WORK always payys off you seem to be going in the right track', - 'The way is classiscal!! a tip for you is to listen to some classical music before studying ','You are driven by your own intrest keep riding', - 'Your study time is great ,now its to take a short break', - 'WOWWW you are just a just synonym of hard work and dedication ' ] - res1="feedback on youe study schedule --> " +random.choice(a) - if z['academic value'].eq(0).all(): - b=['If you know your “WHY”, finding your “HOW" will not be difficult.you just need to start working','Focusing is about saying no.just learn to say no to things which distracts you .u just need to put a little more focus on your studytime', - 'Be the early bird that gets the first worm.set your body clock and start working','listen to directions,follow through assignments,learn for yourself.you just need to enjoy the process', - 'measure for progress not the time you are working ,try to put in more studytime','postponment will postpone you,finish your daily tasks when you have the time', - 'you are just off track,there is still time and sure that you will reach great heights ','you surely have the talent its now in your hands to make wonders!!!! talent without hardwork?? what do you think ','enroll yourself to a personalized learning environament which gives you a controll and education experience '] - res1="feedback on youe study schedule --> "+random.choice(b) - - - if z['time value'].eq(1).all(): - c=['there is a saying give me 6 hours to chop a tree and i will spend the 1st hr sharpening the axe, the fact here is you have sharpenend your axe','your timimg is great you are managing time well' - 'its seems you hsve been studying long take a quick break and come back ','you are enjoying your time keep putting the same efforts you put','keep managing the time like the way you are doing now,this attribute will take care of the rest' - ,'you seem to stay organized and on track with your procative planning and systematic scheduling '] - res2="Feedback on how you manage time --> "+random.choice(c) - if z['time value'].eq(0).all(): - d=['you have to start spending time on academics and show some interest in succeeding,you are the pilot who should stop time from flying and bring it on your control','start working and stick to a time table and set your body clock','try to be more organized and start spending quality time towards studies' - 'start learning to manage time and priortize on your academics','spend more time on your weak areas ,try to strech out for long hours','the biggest obstracle stopping you from winning is time management,prepare a timetable and stick to it', - 'play while you play and work while you work dont try to mix up things','dont try to procastinate finish your day to day jobs when and where you get time'] - res2="Feedback on how you manage time --> "+random.choice(d) - - if z['productive value'].eq(1).all(): - e=['you are smart,productive and have a good way of preparation in your studies','Be more proactive and try to participate in class,you are effiecient and can reach heights with your effectiveness','you have the ability to study things smartly and quickly,pick areas which are more brain-storming', - 'you have the ability to intepret things and your mind is sharp and you are a good listener','you are the master-mind,you are the person who shouldnt miss out in enrolling to IIts,NITs or whatever','you are productive person if u feel you are not delivering your 100% its not because because you arent studying,its something else'] - res3="Feedback on your productivity --> "+random.choice(e) - if z['productive value'].eq(0).all(): - f=['Try to stick on to an approach which is convinient to you ,have a clear mind before you start working','start solving more,puzzles and a daily sudoko is a good start, you just need to be on your toes and tune your mind to solve various activities ','think!think!think analyse where you lack and start building strategies to improve yourself' - 'class participation its high time you start taking decisions and choose to be proactive','connect everything with what you are learining so that it will stick in your mind and helps you to recollect when and where you require','enjoy the process of learning dont be monotonous and a bookworm tame your mind to face your challenges','actively consult your instructor to enrich yourself with lot ways to improve your productivity', - 'rather than a brute-force approach try to think more of an optimal solution to a problem','gather a lot of resoruces and try to sit in your desk ,take mobile breaks(short one), an online chess game might be an eye opener for your next session '] - res3="Feedback on your productivity --> "+random.choice(f) - - if z['fitness_value'].eq(1).all(): - g=['fitness is your key ,if your body is strong your mind is stronger. Maintaining a good fitness is really important for your health as well as it empowers your learining ',' I can see you have spent time in maintaing your body. Keep winning more golds ','you have choosen to step out of your comfort zone and by trying to put some gains,this will surely be a stepping stone in other important sectors','your fitness level is reasonably good indicating that you are sticking to a schedule kind of person which is really good', - 'you are in a good shape which is a key for self_confidence and gives you a lot of motivation','you are a sportive person ,this will really help you to socialize and gives you a lot of energy to start new things ','you are an open-minded person ,this is really the best character one could ask for,half the problems are over if one is listening and able to make good decisions '] - res4="Feedback on your fitness --> "+random.choice(g) - if z['fitness_value'].eq(0).all(): - h=['A weak body is a liability, you guys being the future generation should definetly be fit and healthy to lead the society at its best','your body should always get the first priority and should be taken care properly', - 'Any physical activity will make you disipline and gives you self confidence. Join your school team today ','out of all a hungry stomach isnt fit for a brisk study session ,being physically fit lets you do more activity even improve your academics ', - 'engage yourself in any physical activity for 20 mins as it can improve your concentration and helps your focus in learning ','out of your busy schedule try devoting just 15 mins get down do some pushups or squats or a brisk jog will do good '] - res4="Feedback on your fitness --> "+random.choice(h) - - if z['sleep value'].eq(1).all(): - i=['Good that you have a proper sleep, just stick to it and try finishing all your work in the day time and get enough rest','Its pretty impressive that you are giving enough importance to your sleep, shows that you have good time management skills and a sweet dream','getting a good sleep even during your stressed timetables shows that you stay at the moment', - 'a good fitness routine followed by a good-sleep is a good sunday schedule and a good starter for a hectic next week which i hope you would have experienced many times','its good that you have a good sleep everynight this is big boost for a bright tomorrow'] - res5="Feedback on your sleep time --> "+random.choice(i) - if z['sleep value'].eq(0).all(): - - j=['The time we sleep is only when we rest our mind, eyes and the whole body which is really crucial for a stduent',' Try not using any devices an hour before you sleep, have a good sleep cycle for atleast 6 to 7 hrs a day','Get enough rest, dont stress your body too much.', - 'Prioritize your sleep, dont have caffinated drinks late in the evening and getting good sleep will make you feel fresh and enegrytic all day long ', - 'a 7 - hour refresh will set your body clock for the rest of your day so please ensure that you get adequate rest','if you are sleep deprieved make sure you exhaust all your energy during the day and make sure you get a pleasant and peaceful sleep', - 'tests prove that sleep deprivation is a result for low academic performance make sure you dont fall under that','Please ensure that the extra miles which you are putting doesnt affect your sleep'] - - res5="Feedback on your sleep time --> "+random.choice(j) - - if z['motivation value'].eq(1).all(): - k=['you are fairly motivated ,Motivation drives everyone to work better to achive something,it lits a light inside you ','you should be really proud that you have good motivation at a really young age,use it in areas where you feel a bit off', - 'None of the greatest achievers couldnt have done it without motivation and self motivation is really powerfull tool to success ,you are one among them Keep going!', - 'a good level of motivation gives you high spirits and a good attitude,your attitude builds YOU'] - - res6="motivation factor --> "+random.choice(k) - if z['motivation value'].eq(0).all(): - - l=['Nobody in the world is born with motivation,in this modern era you cant expect external motivation,you better be your own motivation','messi took eighteen years to be the G.O.A.T ignoring all demotivation and insults its finally your time', - 'change your scenery sitting in a desk all-day makes you dull ,to renew interest,a new setting can be just what some students need to stay motivated to learn', - 'lay-out clear objectives before you start learning so that there is no confussion','Make your goals high but attainable dont be afraid to push yourself to get more out of them ', - 'Spend some quality time with your family listen to their experiences and try to dollow their footsteps'] - - - res6="motivation factor --> "+random.choice(l) - - if z['performance_value'].eq(1).all(): - m=['Good job you!! Your hardwork and efforts paid off, you have nothing to worry about ,you are academically strong','To be honest that grades made me a little jealous. I can see the work you are putting towards academics', - 'Give a big hit on boards make your parents and teachers proud, trust me that is super satisfying','academic performance gives you a lot of boost to you take that put in all other aspects which will give you overall developement', - 'the most satisfying thing is scoring high its great that you are easily doing it','you are almost sorted out you now just have to take care of the bits and pieces'] - - res7="Feedback on your performance --> "+random.choice(m) - - if z['performance_value'].eq(0).all(): - n=['Its never late to begin. Divide your work, note important things mentioned in class spend more time in studies','Dont be ashamed to ask doubts we dont mind others judging. So we start from physics today? jk', - 'Start studying with your friends, seek help from teachers,Remember the hardwork you put never fails you','analyse where you are making errors if you find that you are making mistakes while writing try practicing the sample papers it will help you to an extent' - ,'you are almost there!!take short notes of the theoritical concepts so that it will be easy for reference','dont worry about where you are standing at the moment ,back yourself ,start it from scratch'] - - res7="Feedback on your performance --> "+random.choice(n) - - if z['media_value'].eq(1).all(): - o=[' In the world of people being addicted to social media today, its happy to see someone like you','Its good that you are not scrolling too much','Having a good social profile is important and you having a limit is really impressive' - ,'Having the self control on yourself is really great but ensure that dont overdo on anything else','you are self-conscious which is really a great character to acquire'] - - res8="Feedback on your social media time --> "+random.choice(o) - - if z['media_value'].eq(0).all(): - p=['Its really common for this generation people to get addicted to social media. All you have to do is keep track of the time, dont over do stuffs and you dont have to post a story everyday.', - 'Nothing wrong becoming a social idle, but right now concentrate in your studies','socially active is essential but over - scrolling will trap you in the matrix which you are unaware of', - 'stay in your limits socially active for more than a hour during high school is ill advised','knowing that its impacting you and using social media again !! what is that??'] - - res8="Feedback on your social media time --> "+random.choice(p) - - - if z['overall'].eq(1).all(): - q=['OMG!! Im thinking of getting a piece of advise from you you are almost there good that you equally participate in everything','You are an explorer and can learn new things easily,you are about to win the race', - 'Your works are impressing everyone right from your teacher,friends and your parents, You are active,brisk and have good potential to improve your performance', - 'You are doing great ,you are ready for new challenges and failures doesnt bother you ','You are multi tasker and ensure that you dont sink with over-confidence','Dont put yourself in any kind of pressure, eventhough you feel stressed time will answer to it and you will pass with flying colours' - 'You are growing with confidence, take it to learn new things,choose your core and find your destiny'] - - res9=random.choice(q) - - if z['overall'].eq(0).all(): - - r=['Its all good everyone goes out of form,the comeback is always on start putting consistent efforts','Put in the time, hardwork and you can already see it coming,you are just a few steps dowm','When we hit out lowest point we are open to the greatest change you are going to bring the best out of it. And yes that was said by Avatar Roku' - ,'Choose the right person whom you feel will take you through all the obstracles you need make things more clear','The best view comes after the hardest climb you can climb the moutain ahead of you','You just need to reboot and have a good set-up ,stay optimistic and everything will take care of itself if you take one step at a time', - 'You are nearing the pinacle of your true potential,just few changes hear and there you will be on your prime'] - - res9=random.choice(r) - - - - - - - - - return "hi " + str (Name) + " this is a predictive model there may be some incorrect guesses so just take the points which you feel may work in your case \nalso if u feel the feeadbacks are harsh please flag your opinion \ntake your time to read this and hope u like it 😊\n\n\n"+ res1+" ,\n " + res2 +" ,\n " + res3 +" ,\n " + res4 +" ,\n " + res5 +" ,\n " + res6 +" ,\n " + res7 +" ,\n " + res8 +" ,\n\n\n " + res9 - - - - demo = gr.Interface( - assign_weights, - [ - "text", - gr.Dropdown(['Science','Commerce'], label="Choose your stream"), - gr.Radio(["<5", "5 - 12", "13 - 20", "20 - 30",">30"],label='On an average, how many hours a week do you spend on academics?'), - gr.Radio(["0 - 20%", "20 - 40%", "40 - 60%", "60 - 80%","80 -100%"],label='How willing are you to work on a particular task ?'), - gr.Radio(["Yes", "No", ],label='Do you take up any physical activity at regular intervals(at least 3 hours a week) ?'), - gr.Radio(["Football", "Cricket", "Basketball", "Tennis" , "Chess" ,"Other","Not interested in sports"],label='Choose your favourite sport you follow or play'), - gr.Radio(["Never", "Occasionally", "Sometimes", "Often" , "Always"],label='How often do you spend time with your friends and family?'), - gr.Radio(["Always", "Very often", "Sometimes", "Rarely" ,"Never"],label='Has poor sleep troubled you in the last month?'), - gr.Radio(["Perfect", "Good", "Average", "Poor"],label='What is your current level of fitness?'), - gr.Radio(["Never", "Once in a while", "About half the time", "Most of the time","Always"],label='Do you feel kinda losing concentration during classes and other activities'), - gr.Radio(["Never", "Once in a while", "About half the time", "Most of the time","Always"],label='is there a change in your eating habits(either under eating or overeating'), - gr.Radio(["< 2", "2 - 5", "5 - 8", "> 8"],label='How many hours of free time do you have after school?'), - gr.Radio(["Asking a lot of questions to the teacher", "Completing various assignments", "Sports and other extracurricular activities", "Other"],label='What motivates you to learn more?'), - gr.Radio(["<30 mins", "30 - 60", "60 - 120", ">120 mins"],label='How long you spend your time on social media on a daily basis? '), - gr.Radio(["Yes", "No"],label='Do you feel that spending time on social media has been a reason for the deterioration in your academic performance?'), - gr.Radio(["<30%", "30% - 50%", "50% - 70%", "70% - 90%",">90%"],label='How much you score in your academics'), - ], - "text", - - title="Performance predictor and feedback generator", - description="Here's a sample performance calculator. Enjoy!", - - ) -demo.launch(share=True, debug=True) - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_install.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_install.py deleted file mode 100644 index d01b24a918954bd5440c94463369ee7a666aad29..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_install.py +++ /dev/null @@ -1,867 +0,0 @@ -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - -import functools -import logging -import os -import shutil -import sys -import uuid -import zipfile -from optparse import Values -from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union - -from pip._vendor.packaging.markers import Marker -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.specifiers import SpecifierSet -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import Version -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.pyproject_hooks import BuildBackendHookCaller - -from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment -from pip._internal.exceptions import InstallationError -from pip._internal.locations import get_scheme -from pip._internal.metadata import ( - BaseDistribution, - get_default_environment, - get_directory_distribution, - get_wheel_distribution, -) -from pip._internal.metadata.base import FilesystemWheel -from pip._internal.models.direct_url import DirectUrl -from pip._internal.models.link import Link -from pip._internal.operations.build.metadata import generate_metadata -from pip._internal.operations.build.metadata_editable import generate_editable_metadata -from pip._internal.operations.build.metadata_legacy import ( - generate_metadata as generate_metadata_legacy, -) -from pip._internal.operations.install.editable_legacy import ( - install_editable as install_editable_legacy, -) -from pip._internal.operations.install.wheel import install_wheel -from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path -from pip._internal.req.req_uninstall import UninstallPathSet -from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.hashes import Hashes -from pip._internal.utils.misc import ( - ConfiguredBuildBackendHookCaller, - ask_path_exists, - backup_dir, - display_path, - hide_url, - redact_auth_from_url, -) -from pip._internal.utils.packaging import safe_extra -from pip._internal.utils.subprocess import runner_with_spinner_message -from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds -from pip._internal.utils.virtualenv import running_under_virtualenv -from pip._internal.vcs import vcs - -logger = logging.getLogger(__name__) - - -class InstallRequirement: - """ - Represents something that may be installed later on, may have information - about where to fetch the relevant requirement and also contains logic for - installing the said requirement. - """ - - def __init__( - self, - req: Optional[Requirement], - comes_from: Optional[Union[str, "InstallRequirement"]], - editable: bool = False, - link: Optional[Link] = None, - markers: Optional[Marker] = None, - use_pep517: Optional[bool] = None, - isolated: bool = False, - *, - global_options: Optional[List[str]] = None, - hash_options: Optional[Dict[str, List[str]]] = None, - config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, - constraint: bool = False, - extras: Collection[str] = (), - user_supplied: bool = False, - permit_editable_wheels: bool = False, - ) -> None: - assert req is None or isinstance(req, Requirement), req - self.req = req - self.comes_from = comes_from - self.constraint = constraint - self.editable = editable - self.permit_editable_wheels = permit_editable_wheels - - # source_dir is the local directory where the linked requirement is - # located, or unpacked. In case unpacking is needed, creating and - # populating source_dir is done by the RequirementPreparer. Note this - # is not necessarily the directory where pyproject.toml or setup.py is - # located - that one is obtained via unpacked_source_directory. - self.source_dir: Optional[str] = None - if self.editable: - assert link - if link.is_file: - self.source_dir = os.path.normpath(os.path.abspath(link.file_path)) - - if link is None and req and req.url: - # PEP 508 URL requirement - link = Link(req.url) - self.link = self.original_link = link - - # When this InstallRequirement is a wheel obtained from the cache of locally - # built wheels, this is the source link corresponding to the cache entry, which - # was used to download and build the cached wheel. - self.cached_wheel_source_link: Optional[Link] = None - - # Information about the location of the artifact that was downloaded . This - # property is guaranteed to be set in resolver results. - self.download_info: Optional[DirectUrl] = None - - # Path to any downloaded or already-existing package. - self.local_file_path: Optional[str] = None - if self.link and self.link.is_file: - self.local_file_path = self.link.file_path - - if extras: - self.extras = extras - elif req: - self.extras = {safe_extra(extra) for extra in req.extras} - else: - self.extras = set() - if markers is None and req: - markers = req.marker - self.markers = markers - - # This holds the Distribution object if this requirement is already installed. - self.satisfied_by: Optional[BaseDistribution] = None - # Whether the installation process should try to uninstall an existing - # distribution before installing this requirement. - self.should_reinstall = False - # Temporary build location - self._temp_build_dir: Optional[TempDirectory] = None - # Set to True after successful installation - self.install_succeeded: Optional[bool] = None - # Supplied options - self.global_options = global_options if global_options else [] - self.hash_options = hash_options if hash_options else {} - self.config_settings = config_settings - # Set to True after successful preparation of this requirement - self.prepared = False - # User supplied requirement are explicitly requested for installation - # by the user via CLI arguments or requirements files, as opposed to, - # e.g. dependencies, extras or constraints. - self.user_supplied = user_supplied - - self.isolated = isolated - self.build_env: BuildEnvironment = NoOpBuildEnvironment() - - # For PEP 517, the directory where we request the project metadata - # gets stored. We need this to pass to build_wheel, so the backend - # can ensure that the wheel matches the metadata (see the PEP for - # details). - self.metadata_directory: Optional[str] = None - - # The static build requirements (from pyproject.toml) - self.pyproject_requires: Optional[List[str]] = None - - # Build requirements that we will check are available - self.requirements_to_check: List[str] = [] - - # The PEP 517 backend we should use to build the project - self.pep517_backend: Optional[BuildBackendHookCaller] = None - - # Are we using PEP 517 for this requirement? - # After pyproject.toml has been loaded, the only valid values are True - # and False. Before loading, None is valid (meaning "use the default"). - # Setting an explicit value before loading pyproject.toml is supported, - # but after loading this flag should be treated as read only. - self.use_pep517 = use_pep517 - - # This requirement needs more preparation before it can be built - self.needs_more_preparation = False - - def __str__(self) -> str: - if self.req: - s = str(self.req) - if self.link: - s += " from {}".format(redact_auth_from_url(self.link.url)) - elif self.link: - s = redact_auth_from_url(self.link.url) - else: - s = "" - if self.satisfied_by is not None: - if self.satisfied_by.location is not None: - location = display_path(self.satisfied_by.location) - else: - location = "" - s += f" in {location}" - if self.comes_from: - if isinstance(self.comes_from, str): - comes_from: Optional[str] = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += f" (from {comes_from})" - return s - - def __repr__(self) -> str: - return "<{} object: {} editable={!r}>".format( - self.__class__.__name__, str(self), self.editable - ) - - def format_debug(self) -> str: - """An un-tested helper for getting state, for debugging.""" - attributes = vars(self) - names = sorted(attributes) - - state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names)) - return "<{name} object: {{{state}}}>".format( - name=self.__class__.__name__, - state=", ".join(state), - ) - - # Things that are valid for all kinds of requirements? - @property - def name(self) -> Optional[str]: - if self.req is None: - return None - return self.req.name - - @functools.lru_cache() # use cached_property in python 3.8+ - def supports_pyproject_editable(self) -> bool: - if not self.use_pep517: - return False - assert self.pep517_backend - with self.build_env: - runner = runner_with_spinner_message( - "Checking if build backend supports build_editable" - ) - with self.pep517_backend.subprocess_runner(runner): - return "build_editable" in self.pep517_backend._supported_features() - - @property - def specifier(self) -> SpecifierSet: - return self.req.specifier - - @property - def is_pinned(self) -> bool: - """Return whether I am pinned to an exact version. - - For example, some-package==1.2 is pinned; some-package>1.2 is not. - """ - specifiers = self.specifier - return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="} - - def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool: - if not extras_requested: - # Provide an extra to safely evaluate the markers - # without matching any extra - extras_requested = ("",) - if self.markers is not None: - return any( - self.markers.evaluate({"extra": extra}) for extra in extras_requested - ) - else: - return True - - @property - def has_hash_options(self) -> bool: - """Return whether any known-good hashes are specified as options. - - These activate --require-hashes mode; hashes specified as part of a - URL do not. - - """ - return bool(self.hash_options) - - def hashes(self, trust_internet: bool = True) -> Hashes: - """Return a hash-comparer that considers my option- and URL-based - hashes to be known-good. - - Hashes in URLs--ones embedded in the requirements file, not ones - downloaded from an index server--are almost peers with ones from - flags. They satisfy --require-hashes (whether it was implicitly or - explicitly activated) but do not activate it. md5 and sha224 are not - allowed in flags, which should nudge people toward good algos. We - always OR all hashes together, even ones from URLs. - - :param trust_internet: Whether to trust URL-based (#md5=...) hashes - downloaded from the internet, as by populate_link() - - """ - good_hashes = self.hash_options.copy() - if trust_internet: - link = self.link - elif self.original_link and self.user_supplied: - link = self.original_link - else: - link = None - if link and link.hash: - good_hashes.setdefault(link.hash_name, []).append(link.hash) - return Hashes(good_hashes) - - def from_path(self) -> Optional[str]: - """Format a nice indicator to show where this "comes from" """ - if self.req is None: - return None - s = str(self.req) - if self.comes_from: - if isinstance(self.comes_from, str): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += "->" + comes_from - return s - - def ensure_build_location( - self, build_dir: str, autodelete: bool, parallel_builds: bool - ) -> str: - assert build_dir is not None - if self._temp_build_dir is not None: - assert self._temp_build_dir.path - return self._temp_build_dir.path - if self.req is None: - # Some systems have /tmp as a symlink which confuses custom - # builds (such as numpy). Thus, we ensure that the real path - # is returned. - self._temp_build_dir = TempDirectory( - kind=tempdir_kinds.REQ_BUILD, globally_managed=True - ) - - return self._temp_build_dir.path - - # This is the only remaining place where we manually determine the path - # for the temporary directory. It is only needed for editables where - # it is the value of the --src option. - - # When parallel builds are enabled, add a UUID to the build directory - # name so multiple builds do not interfere with each other. - dir_name: str = canonicalize_name(self.name) - if parallel_builds: - dir_name = f"{dir_name}_{uuid.uuid4().hex}" - - # FIXME: Is there a better place to create the build_dir? (hg and bzr - # need this) - if not os.path.exists(build_dir): - logger.debug("Creating directory %s", build_dir) - os.makedirs(build_dir) - actual_build_dir = os.path.join(build_dir, dir_name) - # `None` indicates that we respect the globally-configured deletion - # settings, which is what we actually want when auto-deleting. - delete_arg = None if autodelete else False - return TempDirectory( - path=actual_build_dir, - delete=delete_arg, - kind=tempdir_kinds.REQ_BUILD, - globally_managed=True, - ).path - - def _set_requirement(self) -> None: - """Set requirement after generating metadata.""" - assert self.req is None - assert self.metadata is not None - assert self.source_dir is not None - - # Construct a Requirement object from the generated metadata - if isinstance(parse_version(self.metadata["Version"]), Version): - op = "==" - else: - op = "===" - - self.req = Requirement( - "".join( - [ - self.metadata["Name"], - op, - self.metadata["Version"], - ] - ) - ) - - def warn_on_mismatching_name(self) -> None: - metadata_name = canonicalize_name(self.metadata["Name"]) - if canonicalize_name(self.req.name) == metadata_name: - # Everything is fine. - return - - # If we're here, there's a mismatch. Log a warning about it. - logger.warning( - "Generating metadata for package %s " - "produced metadata for project name %s. Fix your " - "#egg=%s fragments.", - self.name, - metadata_name, - self.name, - ) - self.req = Requirement(metadata_name) - - def check_if_exists(self, use_user_site: bool) -> None: - """Find an installed distribution that satisfies or conflicts - with this requirement, and set self.satisfied_by or - self.should_reinstall appropriately. - """ - if self.req is None: - return - existing_dist = get_default_environment().get_distribution(self.req.name) - if not existing_dist: - return - - version_compatible = self.req.specifier.contains( - existing_dist.version, - prereleases=True, - ) - if not version_compatible: - self.satisfied_by = None - if use_user_site: - if existing_dist.in_usersite: - self.should_reinstall = True - elif running_under_virtualenv() and existing_dist.in_site_packages: - raise InstallationError( - f"Will not install to the user site because it will " - f"lack sys.path precedence to {existing_dist.raw_name} " - f"in {existing_dist.location}" - ) - else: - self.should_reinstall = True - else: - if self.editable: - self.should_reinstall = True - # when installing editables, nothing pre-existing should ever - # satisfy - self.satisfied_by = None - else: - self.satisfied_by = existing_dist - - # Things valid for wheels - @property - def is_wheel(self) -> bool: - if not self.link: - return False - return self.link.is_wheel - - @property - def is_wheel_from_cache(self) -> bool: - # When True, it means that this InstallRequirement is a local wheel file in the - # cache of locally built wheels. - return self.cached_wheel_source_link is not None - - # Things valid for sdists - @property - def unpacked_source_directory(self) -> str: - return os.path.join( - self.source_dir, self.link and self.link.subdirectory_fragment or "" - ) - - @property - def setup_py_path(self) -> str: - assert self.source_dir, f"No source dir for {self}" - setup_py = os.path.join(self.unpacked_source_directory, "setup.py") - - return setup_py - - @property - def setup_cfg_path(self) -> str: - assert self.source_dir, f"No source dir for {self}" - setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg") - - return setup_cfg - - @property - def pyproject_toml_path(self) -> str: - assert self.source_dir, f"No source dir for {self}" - return make_pyproject_path(self.unpacked_source_directory) - - def load_pyproject_toml(self) -> None: - """Load the pyproject.toml file. - - After calling this routine, all of the attributes related to PEP 517 - processing for this requirement have been set. In particular, the - use_pep517 attribute can be used to determine whether we should - follow the PEP 517 or legacy (setup.py) code path. - """ - pyproject_toml_data = load_pyproject_toml( - self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self) - ) - - if pyproject_toml_data is None: - if self.config_settings: - deprecated( - reason=f"Config settings are ignored for project {self}.", - replacement=( - "to use --use-pep517 or add a " - "pyproject.toml file to the project" - ), - gone_in="23.3", - ) - self.use_pep517 = False - return - - self.use_pep517 = True - requires, backend, check, backend_path = pyproject_toml_data - self.requirements_to_check = check - self.pyproject_requires = requires - self.pep517_backend = ConfiguredBuildBackendHookCaller( - self, - self.unpacked_source_directory, - backend, - backend_path=backend_path, - ) - - def isolated_editable_sanity_check(self) -> None: - """Check that an editable requirement if valid for use with PEP 517/518. - - This verifies that an editable that has a pyproject.toml either supports PEP 660 - or as a setup.py or a setup.cfg - """ - if ( - self.editable - and self.use_pep517 - and not self.supports_pyproject_editable() - and not os.path.isfile(self.setup_py_path) - and not os.path.isfile(self.setup_cfg_path) - ): - raise InstallationError( - f"Project {self} has a 'pyproject.toml' and its build " - f"backend is missing the 'build_editable' hook. Since it does not " - f"have a 'setup.py' nor a 'setup.cfg', " - f"it cannot be installed in editable mode. " - f"Consider using a build backend that supports PEP 660." - ) - - def prepare_metadata(self) -> None: - """Ensure that project metadata is available. - - Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. - Under legacy processing, call setup.py egg-info. - """ - assert self.source_dir - details = self.name or f"from {self.link}" - - if self.use_pep517: - assert self.pep517_backend is not None - if ( - self.editable - and self.permit_editable_wheels - and self.supports_pyproject_editable() - ): - self.metadata_directory = generate_editable_metadata( - build_env=self.build_env, - backend=self.pep517_backend, - details=details, - ) - else: - self.metadata_directory = generate_metadata( - build_env=self.build_env, - backend=self.pep517_backend, - details=details, - ) - else: - self.metadata_directory = generate_metadata_legacy( - build_env=self.build_env, - setup_py_path=self.setup_py_path, - source_dir=self.unpacked_source_directory, - isolated=self.isolated, - details=details, - ) - - # Act on the newly generated metadata, based on the name and version. - if not self.name: - self._set_requirement() - else: - self.warn_on_mismatching_name() - - self.assert_source_matches_version() - - @property - def metadata(self) -> Any: - if not hasattr(self, "_metadata"): - self._metadata = self.get_dist().metadata - - return self._metadata - - def get_dist(self) -> BaseDistribution: - if self.metadata_directory: - return get_directory_distribution(self.metadata_directory) - elif self.local_file_path and self.is_wheel: - return get_wheel_distribution( - FilesystemWheel(self.local_file_path), canonicalize_name(self.name) - ) - raise AssertionError( - f"InstallRequirement {self} has no metadata directory and no wheel: " - f"can't make a distribution." - ) - - def assert_source_matches_version(self) -> None: - assert self.source_dir - version = self.metadata["version"] - if self.req.specifier and version not in self.req.specifier: - logger.warning( - "Requested %s, but installing version %s", - self, - version, - ) - else: - logger.debug( - "Source in %s has version %s, which satisfies requirement %s", - display_path(self.source_dir), - version, - self, - ) - - # For both source distributions and editables - def ensure_has_source_dir( - self, - parent_dir: str, - autodelete: bool = False, - parallel_builds: bool = False, - ) -> None: - """Ensure that a source_dir is set. - - This will create a temporary build dir if the name of the requirement - isn't known yet. - - :param parent_dir: The ideal pip parent_dir for the source_dir. - Generally src_dir for editables and build_dir for sdists. - :return: self.source_dir - """ - if self.source_dir is None: - self.source_dir = self.ensure_build_location( - parent_dir, - autodelete=autodelete, - parallel_builds=parallel_builds, - ) - - # For editable installations - def update_editable(self) -> None: - if not self.link: - logger.debug( - "Cannot update repository at %s; repository location is unknown", - self.source_dir, - ) - return - assert self.editable - assert self.source_dir - if self.link.scheme == "file": - # Static paths don't get updated - return - vcs_backend = vcs.get_backend_for_scheme(self.link.scheme) - # Editable requirements are validated in Requirement constructors. - # So here, if it's neither a path nor a valid VCS URL, it's a bug. - assert vcs_backend, f"Unsupported VCS URL {self.link.url}" - hidden_url = hide_url(self.link.url) - vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0) - - # Top-level Actions - def uninstall( - self, auto_confirm: bool = False, verbose: bool = False - ) -> Optional[UninstallPathSet]: - """ - Uninstall the distribution currently satisfying this requirement. - - Prompts before removing or modifying files unless - ``auto_confirm`` is True. - - Refuses to delete or modify files outside of ``sys.prefix`` - - thus uninstallation within a virtual environment can only - modify that virtual environment, even if the virtualenv is - linked to global site-packages. - - """ - assert self.req - dist = get_default_environment().get_distribution(self.req.name) - if not dist: - logger.warning("Skipping %s as it is not installed.", self.name) - return None - logger.info("Found existing installation: %s", dist) - - uninstalled_pathset = UninstallPathSet.from_dist(dist) - uninstalled_pathset.remove(auto_confirm, verbose) - return uninstalled_pathset - - def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str: - def _clean_zip_name(name: str, prefix: str) -> str: - assert name.startswith( - prefix + os.path.sep - ), f"name {name!r} doesn't start with prefix {prefix!r}" - name = name[len(prefix) + 1 :] - name = name.replace(os.path.sep, "/") - return name - - path = os.path.join(parentdir, path) - name = _clean_zip_name(path, rootdir) - return self.name + "/" + name - - def archive(self, build_dir: Optional[str]) -> None: - """Saves archive to provided build_dir. - - Used for saving downloaded VCS requirements as part of `pip download`. - """ - assert self.source_dir - if build_dir is None: - return - - create_archive = True - archive_name = "{}-{}.zip".format(self.name, self.metadata["version"]) - archive_path = os.path.join(build_dir, archive_name) - - if os.path.exists(archive_path): - response = ask_path_exists( - "The file {} exists. (i)gnore, (w)ipe, " - "(b)ackup, (a)bort ".format(display_path(archive_path)), - ("i", "w", "b", "a"), - ) - if response == "i": - create_archive = False - elif response == "w": - logger.warning("Deleting %s", display_path(archive_path)) - os.remove(archive_path) - elif response == "b": - dest_file = backup_dir(archive_path) - logger.warning( - "Backing up %s to %s", - display_path(archive_path), - display_path(dest_file), - ) - shutil.move(archive_path, dest_file) - elif response == "a": - sys.exit(-1) - - if not create_archive: - return - - zip_output = zipfile.ZipFile( - archive_path, - "w", - zipfile.ZIP_DEFLATED, - allowZip64=True, - ) - with zip_output: - dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory)) - for dirpath, dirnames, filenames in os.walk(dir): - for dirname in dirnames: - dir_arcname = self._get_archive_name( - dirname, - parentdir=dirpath, - rootdir=dir, - ) - zipdir = zipfile.ZipInfo(dir_arcname + "/") - zipdir.external_attr = 0x1ED << 16 # 0o755 - zip_output.writestr(zipdir, "") - for filename in filenames: - file_arcname = self._get_archive_name( - filename, - parentdir=dirpath, - rootdir=dir, - ) - filename = os.path.join(dirpath, filename) - zip_output.write(filename, file_arcname) - - logger.info("Saved %s", display_path(archive_path)) - - def install( - self, - global_options: Optional[Sequence[str]] = None, - root: Optional[str] = None, - home: Optional[str] = None, - prefix: Optional[str] = None, - warn_script_location: bool = True, - use_user_site: bool = False, - pycompile: bool = True, - ) -> None: - scheme = get_scheme( - self.name, - user=use_user_site, - home=home, - root=root, - isolated=self.isolated, - prefix=prefix, - ) - - if self.editable and not self.is_wheel: - install_editable_legacy( - global_options=global_options if global_options is not None else [], - prefix=prefix, - home=home, - use_user_site=use_user_site, - name=self.name, - setup_py_path=self.setup_py_path, - isolated=self.isolated, - build_env=self.build_env, - unpacked_source_directory=self.unpacked_source_directory, - ) - self.install_succeeded = True - return - - assert self.is_wheel - assert self.local_file_path - - install_wheel( - self.name, - self.local_file_path, - scheme=scheme, - req_description=str(self.req), - pycompile=pycompile, - warn_script_location=warn_script_location, - direct_url=self.download_info if self.original_link else None, - requested=self.user_supplied, - ) - self.install_succeeded = True - - -def check_invalid_constraint_type(req: InstallRequirement) -> str: - # Check for unsupported forms - problem = "" - if not req.name: - problem = "Unnamed requirements are not allowed as constraints" - elif req.editable: - problem = "Editable requirements are not allowed as constraints" - elif req.extras: - problem = "Constraints cannot have extras" - - if problem: - deprecated( - reason=( - "Constraints are only allowed to take the form of a package " - "name and a version specifier. Other forms were originally " - "permitted as an accident of the implementation, but were " - "undocumented. The new implementation of the resolver no " - "longer supports these forms." - ), - replacement="replacing the constraint with a requirement", - # No plan yet for when the new resolver becomes default - gone_in=None, - issue=8210, - ) - - return problem - - -def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool: - if getattr(options, option, None): - return True - for req in reqs: - if getattr(req, option, None): - return True - return False - - -def check_legacy_setup_py_options( - options: Values, - reqs: List[InstallRequirement], -) -> None: - has_build_options = _has_option(options, reqs, "build_options") - has_global_options = _has_option(options, reqs, "global_options") - if has_build_options or has_global_options: - deprecated( - reason="--build-option and --global-option are deprecated.", - issue=11859, - replacement="to use --config-settings", - gone_in="23.3", - ) - logger.warning( - "Implying --no-binary=:all: due to the presence of " - "--build-option / --global-option. " - ) - options.format_control.disallow_binaries() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py deleted file mode 100644 index ca4ec341184adb3d30f3cd825b49a81b87d29b08..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py +++ /dev/null @@ -1,537 +0,0 @@ -from __future__ import absolute_import - -import collections -import functools -import logging - -from ._collections import RecentlyUsedContainer -from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme -from .exceptions import ( - LocationValueError, - MaxRetryError, - ProxySchemeUnknown, - ProxySchemeUnsupported, - URLSchemeUnknown, -) -from .packages import six -from .packages.six.moves.urllib.parse import urljoin -from .request import RequestMethods -from .util.proxy import connection_requires_http_tunnel -from .util.retry import Retry -from .util.url import parse_url - -__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] - - -log = logging.getLogger(__name__) - -SSL_KEYWORDS = ( - "key_file", - "cert_file", - "cert_reqs", - "ca_certs", - "ssl_version", - "ca_cert_dir", - "ssl_context", - "key_password", - "server_hostname", -) - -# All known keyword arguments that could be provided to the pool manager, its -# pools, or the underlying connections. This is used to construct a pool key. -_key_fields = ( - "key_scheme", # str - "key_host", # str - "key_port", # int - "key_timeout", # int or float or Timeout - "key_retries", # int or Retry - "key_strict", # bool - "key_block", # bool - "key_source_address", # str - "key_key_file", # str - "key_key_password", # str - "key_cert_file", # str - "key_cert_reqs", # str - "key_ca_certs", # str - "key_ssl_version", # str - "key_ca_cert_dir", # str - "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - "key_maxsize", # int - "key_headers", # dict - "key__proxy", # parsed proxy url - "key__proxy_headers", # dict - "key__proxy_config", # class - "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples - "key__socks_options", # dict - "key_assert_hostname", # bool or string - "key_assert_fingerprint", # str - "key_server_hostname", # str -) - -#: The namedtuple class used to construct keys for the connection pool. -#: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple("PoolKey", _key_fields) - -_proxy_config_fields = ("ssl_context", "use_forwarding_for_https") -ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) - - -def _default_key_normalizer(key_class, request_context): - """ - Create a pool key out of a request context dictionary. - - According to RFC 3986, both the scheme and host are case-insensitive. - Therefore, this function normalizes both before constructing the pool - key for an HTTPS request. If you wish to change this behaviour, provide - alternate callables to ``key_fn_by_scheme``. - - :param key_class: - The class to use when constructing the key. This should be a namedtuple - with the ``scheme`` and ``host`` keys at a minimum. - :type key_class: namedtuple - :param request_context: - A dictionary-like object that contain the context for a request. - :type request_context: dict - - :return: A namedtuple that can be used as a connection pool key. - :rtype: PoolKey - """ - # Since we mutate the dictionary, make a copy first - context = request_context.copy() - context["scheme"] = context["scheme"].lower() - context["host"] = context["host"].lower() - - # These are both dictionaries and need to be transformed into frozensets - for key in ("headers", "_proxy_headers", "_socks_options"): - if key in context and context[key] is not None: - context[key] = frozenset(context[key].items()) - - # The socket_options key may be a list and needs to be transformed into a - # tuple. - socket_opts = context.get("socket_options") - if socket_opts is not None: - context["socket_options"] = tuple(socket_opts) - - # Map the kwargs to the names in the namedtuple - this is necessary since - # namedtuples can't have fields starting with '_'. - for key in list(context.keys()): - context["key_" + key] = context.pop(key) - - # Default to ``None`` for keys missing from the context - for field in key_class._fields: - if field not in context: - context[field] = None - - return key_class(**context) - - -#: A dictionary that maps a scheme to a callable that creates a pool key. -#: This can be used to alter the way pool keys are constructed, if desired. -#: Each PoolManager makes a copy of this dictionary so they can be configured -#: globally here, or individually on the instance. -key_fn_by_scheme = { - "http": functools.partial(_default_key_normalizer, PoolKey), - "https": functools.partial(_default_key_normalizer, PoolKey), -} - -pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} - - -class PoolManager(RequestMethods): - """ - Allows for arbitrary requests while transparently keeping track of - necessary connection pools for you. - - :param num_pools: - Number of connection pools to cache before discarding the least - recently used pool. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param \\**connection_pool_kw: - Additional parameters are used to create fresh - :class:`urllib3.connectionpool.ConnectionPool` instances. - - Example:: - - >>> manager = PoolManager(num_pools=2) - >>> r = manager.request('GET', 'http://google.com/') - >>> r = manager.request('GET', 'http://google.com/mail') - >>> r = manager.request('GET', 'http://yahoo.com/') - >>> len(manager.pools) - 2 - - """ - - proxy = None - proxy_config = None - - def __init__(self, num_pools=10, headers=None, **connection_pool_kw): - RequestMethods.__init__(self, headers) - self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) - - # Locally set the pool classes and keys so other PoolManagers can - # override them. - self.pool_classes_by_scheme = pool_classes_by_scheme - self.key_fn_by_scheme = key_fn_by_scheme.copy() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.clear() - # Return False to re-raise any potential exceptions - return False - - def _new_pool(self, scheme, host, port, request_context=None): - """ - Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and - any additional pool keyword arguments. - - If ``request_context`` is provided, it is provided as keyword arguments - to the pool class used. This method is used to actually create the - connection pools handed out by :meth:`connection_from_url` and - companion methods. It is intended to be overridden for customization. - """ - pool_cls = self.pool_classes_by_scheme[scheme] - if request_context is None: - request_context = self.connection_pool_kw.copy() - - # Although the context has everything necessary to create the pool, - # this function has historically only used the scheme, host, and port - # in the positional args. When an API change is acceptable these can - # be removed. - for key in ("scheme", "host", "port"): - request_context.pop(key, None) - - if scheme == "http": - for kw in SSL_KEYWORDS: - request_context.pop(kw, None) - - return pool_cls(host, port, **request_context) - - def clear(self): - """ - Empty our store of pools and direct them all to close. - - This will not affect in-flight connections, but they will not be - re-used after completion. - """ - self.pools.clear() - - def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): - """ - Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. - - If ``port`` isn't given, it will be derived from the ``scheme`` using - ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is - provided, it is merged with the instance's ``connection_pool_kw`` - variable and used to create the new connection pool, if one is - needed. - """ - - if not host: - raise LocationValueError("No host specified.") - - request_context = self._merge_pool_kwargs(pool_kwargs) - request_context["scheme"] = scheme or "http" - if not port: - port = port_by_scheme.get(request_context["scheme"].lower(), 80) - request_context["port"] = port - request_context["host"] = host - - return self.connection_from_context(request_context) - - def connection_from_context(self, request_context): - """ - Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. - - ``request_context`` must at least contain the ``scheme`` key and its - value must be a key in ``key_fn_by_scheme`` instance variable. - """ - scheme = request_context["scheme"].lower() - pool_key_constructor = self.key_fn_by_scheme.get(scheme) - if not pool_key_constructor: - raise URLSchemeUnknown(scheme) - pool_key = pool_key_constructor(request_context) - - return self.connection_from_pool_key(pool_key, request_context=request_context) - - def connection_from_pool_key(self, pool_key, request_context=None): - """ - Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. - - ``pool_key`` should be a namedtuple that only contains immutable - objects. At a minimum it must have the ``scheme``, ``host``, and - ``port`` fields. - """ - with self.pools.lock: - # If the scheme, host, or port doesn't match existing open - # connections, open a new ConnectionPool. - pool = self.pools.get(pool_key) - if pool: - return pool - - # Make a fresh ConnectionPool of the desired type - scheme = request_context["scheme"] - host = request_context["host"] - port = request_context["port"] - pool = self._new_pool(scheme, host, port, request_context=request_context) - self.pools[pool_key] = pool - - return pool - - def connection_from_url(self, url, pool_kwargs=None): - """ - Similar to :func:`urllib3.connectionpool.connection_from_url`. - - If ``pool_kwargs`` is not provided and a new pool needs to be - constructed, ``self.connection_pool_kw`` is used to initialize - the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` - is provided, it is used instead. Note that if a new pool does not - need to be created for the request, the provided ``pool_kwargs`` are - not used. - """ - u = parse_url(url) - return self.connection_from_host( - u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs - ) - - def _merge_pool_kwargs(self, override): - """ - Merge a dictionary of override values for self.connection_pool_kw. - - This does not modify self.connection_pool_kw and returns a new dict. - Any keys in the override dictionary with a value of ``None`` are - removed from the merged dictionary. - """ - base_pool_kwargs = self.connection_pool_kw.copy() - if override: - for key, value in override.items(): - if value is None: - try: - del base_pool_kwargs[key] - except KeyError: - pass - else: - base_pool_kwargs[key] = value - return base_pool_kwargs - - def _proxy_requires_url_absolute_form(self, parsed_url): - """ - Indicates if the proxy requires the complete destination URL in the - request. Normally this is only needed when not using an HTTP CONNECT - tunnel. - """ - if self.proxy is None: - return False - - return not connection_requires_http_tunnel( - self.proxy, self.proxy_config, parsed_url.scheme - ) - - def _validate_proxy_scheme_url_selection(self, url_scheme): - """ - Validates that were not attempting to do TLS in TLS connections on - Python2 or with unsupported SSL implementations. - """ - if self.proxy is None or url_scheme != "https": - return - - if self.proxy.scheme != "https": - return - - if six.PY2 and not self.proxy_config.use_forwarding_for_https: - raise ProxySchemeUnsupported( - "Contacting HTTPS destinations through HTTPS proxies " - "'via CONNECT tunnels' is not supported in Python 2" - ) - - def urlopen(self, method, url, redirect=True, **kw): - """ - Same as :meth:`urllib3.HTTPConnectionPool.urlopen` - with custom cross-host redirect logic and only sends the request-uri - portion of the ``url``. - - The given ``url`` parameter must be absolute, such that an appropriate - :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. - """ - u = parse_url(url) - self._validate_proxy_scheme_url_selection(u.scheme) - - conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - - kw["assert_same_host"] = False - kw["redirect"] = False - - if "headers" not in kw: - kw["headers"] = self.headers.copy() - - if self._proxy_requires_url_absolute_form(u): - response = conn.urlopen(method, url, **kw) - else: - response = conn.urlopen(method, u.request_uri, **kw) - - redirect_location = redirect and response.get_redirect_location() - if not redirect_location: - return response - - # Support relative URLs for redirecting. - redirect_location = urljoin(url, redirect_location) - - # RFC 7231, Section 6.4.4 - if response.status == 303: - method = "GET" - - retries = kw.get("retries") - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect) - - # Strip headers marked as unsafe to forward to the redirected location. - # Check remove_headers_on_redirect to avoid a potential network call within - # conn.is_same_host() which may use socket.gethostbyname() in the future. - if retries.remove_headers_on_redirect and not conn.is_same_host( - redirect_location - ): - headers = list(six.iterkeys(kw["headers"])) - for header in headers: - if header.lower() in retries.remove_headers_on_redirect: - kw["headers"].pop(header, None) - - try: - retries = retries.increment(method, url, response=response, _pool=conn) - except MaxRetryError: - if retries.raise_on_redirect: - response.drain_conn() - raise - return response - - kw["retries"] = retries - kw["redirect"] = redirect - - log.info("Redirecting %s -> %s", url, redirect_location) - - response.drain_conn() - return self.urlopen(method, redirect_location, **kw) - - -class ProxyManager(PoolManager): - """ - Behaves just like :class:`PoolManager`, but sends all requests through - the defined proxy, using the CONNECT method for HTTPS URLs. - - :param proxy_url: - The URL of the proxy to be used. - - :param proxy_headers: - A dictionary containing headers that will be sent to the proxy. In case - of HTTP they are being sent with each request, while in the - HTTPS/CONNECT case they are sent only once. Could be used for proxy - authentication. - - :param proxy_ssl_context: - The proxy SSL context is used to establish the TLS connection to the - proxy when using HTTPS proxies. - - :param use_forwarding_for_https: - (Defaults to False) If set to True will forward requests to the HTTPS - proxy to be made on behalf of the client instead of creating a TLS - tunnel via the CONNECT method. **Enabling this flag means that request - and response headers and content will be visible from the HTTPS proxy** - whereas tunneling keeps request and response headers and content - private. IP address, target hostname, SNI, and port are always visible - to an HTTPS proxy even when this flag is disabled. - - Example: - >>> proxy = urllib3.ProxyManager('http://localhost:3128/') - >>> r1 = proxy.request('GET', 'http://google.com/') - >>> r2 = proxy.request('GET', 'http://httpbin.org/') - >>> len(proxy.pools) - 1 - >>> r3 = proxy.request('GET', 'https://httpbin.org/') - >>> r4 = proxy.request('GET', 'https://twitter.com/') - >>> len(proxy.pools) - 3 - - """ - - def __init__( - self, - proxy_url, - num_pools=10, - headers=None, - proxy_headers=None, - proxy_ssl_context=None, - use_forwarding_for_https=False, - **connection_pool_kw - ): - - if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = "%s://%s:%i" % ( - proxy_url.scheme, - proxy_url.host, - proxy_url.port, - ) - proxy = parse_url(proxy_url) - - if proxy.scheme not in ("http", "https"): - raise ProxySchemeUnknown(proxy.scheme) - - if not proxy.port: - port = port_by_scheme.get(proxy.scheme, 80) - proxy = proxy._replace(port=port) - - self.proxy = proxy - self.proxy_headers = proxy_headers or {} - self.proxy_ssl_context = proxy_ssl_context - self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https) - - connection_pool_kw["_proxy"] = self.proxy - connection_pool_kw["_proxy_headers"] = self.proxy_headers - connection_pool_kw["_proxy_config"] = self.proxy_config - - super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw) - - def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): - if scheme == "https": - return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs - ) - - return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs - ) - - def _set_proxy_headers(self, url, headers=None): - """ - Sets headers needed by proxies: specifically, the Accept and Host - headers. Only sets headers not provided by the user. - """ - headers_ = {"Accept": "*/*"} - - netloc = parse_url(url).netloc - if netloc: - headers_["Host"] = netloc - - if headers: - headers_.update(headers) - return headers_ - - def urlopen(self, method, url, redirect=True, **kw): - "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." - u = parse_url(url) - if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme): - # For connections using HTTP CONNECT, httplib sets the necessary - # headers on the CONNECT to the proxy. If we're not using CONNECT, - # we'll definitely need to set 'Host' at the very least. - headers = kw.get("headers", self.headers) - kw["headers"] = self._set_proxy_headers(url, headers) - - return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) - - -def proxy_from_url(url, **kw): - return ProxyManager(proxy_url=url, **kw) diff --git a/spaces/Avkash/Satellite_Segmentation_Prediction/README.md b/spaces/Avkash/Satellite_Segmentation_Prediction/README.md deleted file mode 100644 index d8e94a650966595764c2a3701c40b0c714250789..0000000000000000000000000000000000000000 --- a/spaces/Avkash/Satellite_Segmentation_Prediction/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Satellite Segmentation Prediction -emoji: 💻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.8 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Awesimo/jojogan/e4e/options/__init__.py b/spaces/Awesimo/jojogan/e4e/options/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/__init__.py deleted file mode 100644 index 6b0668157052ce7b796ef50bc7ee85361e7605b9..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -from .build import META_ARCH_REGISTRY, build_model # isort:skip - -from .panoptic_fpn import PanopticFPN - -# import all the meta_arch, so they will be registered -from .rcnn import GeneralizedRCNN, ProposalNetwork -from .dense_detector import DenseDetector -from .retinanet import RetinaNet -from .fcos import FCOS -from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head - - -__all__ = list(globals().keys()) diff --git a/spaces/AymanKUMA/Speech-Bubbles-detector/app.py b/spaces/AymanKUMA/Speech-Bubbles-detector/app.py deleted file mode 100644 index 6209e7c842f41e87e27de4bcc8487bf43469e140..0000000000000000000000000000000000000000 --- a/spaces/AymanKUMA/Speech-Bubbles-detector/app.py +++ /dev/null @@ -1,214 +0,0 @@ -from ultralytics import YOLO -import streamlit as st -from PIL import Image - -if __name__ == '__main__': - - st.set_page_config(layout="wide") - - myhtml = ''' - - -
-
-

Speech Bubble Detector

-

Revolutionize Manga and Comic Reading Experience with YOLOv8 -
The Cutting-Edge Model That Detects Speech Bubbles with Unmatched Precision

-
-
- ''' - - style = ''' - - ''' - - st.markdown(style, unsafe_allow_html=True) - st.markdown(myhtml, unsafe_allow_html=True) - col1, col2, col3 = st.columns((2, 1, 1)) - - with col1: - st.title('Speech bubble detection ') - uploaded_file = st.file_uploader("Load image", type=["jpg", "jpeg", "png"]) - - if uploaded_file is not None: - is_valid = True - with st.spinner(text='Uploading image...'): - with col2: - st.image(uploaded_file, caption="Input Page", use_column_width=True) - picture = Image.open(uploaded_file) - else: - is_valid = False - if is_valid: - with col3: - with st.spinner(text='Processing image...'): - model = YOLO('best.pt') - results = model.predict(task="detect", source=picture, conf=0.85) - img = results[0].plot() - st.image(img, caption="Detected Objects", use_column_width=True) - - my2ndHtml = ''' -
-
-
-

About

-

Our model detects speech bubbles from manga and comics using YOLOv8 by ultralytics. With a custom dataset of 2000 images, our model is able to accurately detect and classify speech bubbles in a wide range of styles and formats.

-

Speech bubbles are an essential part of comic books and manga, allowing characters to speak and express emotions. Our model makes it easy to extract speech bubbles from images, making it a valuable tool for researchers, artists, and publishers alike.

-

This model is for academic use ONLY. Do not use it for any commercial purpose.

-
-
-

Examples

- -
-
-
- ''' - st.markdown(my2ndHtml, unsafe_allow_html=True) - my3rdHtml = '''''' - st.markdown(my3rdHtml, unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/Bart92/RVC_HF/infer/modules/vc/modules.py b/spaces/Bart92/RVC_HF/infer/modules/vc/modules.py deleted file mode 100644 index 458cfbe860b23bdd8f07abc2934443e6b8b01c3a..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/infer/modules/vc/modules.py +++ /dev/null @@ -1,526 +0,0 @@ -import os, sys -import traceback -import logging -now_dir = os.getcwd() -sys.path.append(now_dir) -logger = logging.getLogger(__name__) -import lib.globals.globals as rvc_globals -import numpy as np -import soundfile as sf -import torch -from io import BytesIO -from infer.lib.audio import load_audio -from infer.lib.audio import wav2 -from infer.lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from infer.modules.vc.pipeline import Pipeline -from infer.modules.vc.utils import * -import time -import scipy.io.wavfile as wavfile - -def note_to_hz(note_name): - SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} - pitch_class, octave = note_name[:-1], int(note_name[-1]) - semitone = SEMITONES[pitch_class] - note_number = 12 * (octave - 4) + semitone - frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number - return frequency - -class VC: - def __init__(self, config): - self.n_spk = None - self.tgt_sr = None - self.net_g = None - self.pipeline = None - self.cpt = None - self.version = None - self.if_f0 = None - self.version = None - self.hubert_model = None - - self.config = config - - def get_vc(self, sid, *to_return_protect): - logger.info("Get sid: " + sid) - - to_return_protect0 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[0] - if self.if_f0 != 0 and to_return_protect - else 0.5, - "__type__": "update", - } - to_return_protect1 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[1] - if self.if_f0 != 0 and to_return_protect - else 0.33, - "__type__": "update", - } - - if not sid: - if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - logger.info("Clean model cache") - del ( - self.net_g, - self.n_spk, - self.vc, - self.hubert_model, - self.tgt_sr, - ) # ,cpt - self.hubert_model = ( - self.net_g - ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) - del self.net_g, self.cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return ( - {"visible": False, "__type__": "update"}, - { - "visible": True, - "value": to_return_protect0, - "__type__": "update", - }, - { - "visible": True, - "value": to_return_protect1, - "__type__": "update", - }, - "", - "", - ) - #person = f'{os.getenv("weight_root")}/{sid}' - person = f'{sid}' - #logger.info(f"Loading: {person}") - logger.info(f"Loading...") - self.cpt = torch.load(person, map_location="cpu") - self.tgt_sr = self.cpt["config"][-1] - self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - - synthesizer_class = { - ("v1", 1): SynthesizerTrnMs256NSFsid, - ("v1", 0): SynthesizerTrnMs256NSFsid_nono, - ("v2", 1): SynthesizerTrnMs768NSFsid, - ("v2", 0): SynthesizerTrnMs768NSFsid_nono, - } - - self.net_g = synthesizer_class.get( - (self.version, self.if_f0), SynthesizerTrnMs256NSFsid - )(*self.cpt["config"], is_half=self.config.is_half) - - del self.net_g.enc_q - - self.net_g.load_state_dict(self.cpt["weight"], strict=False) - self.net_g.eval().to(self.config.device) - if self.config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - - self.pipeline = Pipeline(self.tgt_sr, self.config) - n_spk = self.cpt["config"][-3] - index = {"value": get_index_path_from_model(sid), "__type__": "update"} - logger.info("Select index: " + index["value"]) - - return ( - ( - {"visible": False, "maximum": n_spk, "__type__": "update"}, - to_return_protect0, - to_return_protect1 - ) - if to_return_protect - else {"visible": False, "maximum": n_spk, "__type__": "update"} - ) - - - def vc_single( - self, - sid, - input_audio_path0, - input_audio_path1, - f0_up_key, - f0_file, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ): - global total_time - total_time = 0 - start_time = time.time() - if not input_audio_path0 and not input_audio_path1: - return "You need to upload an audio", None - - if (not os.path.exists(input_audio_path0)) and (not os.path.exists(os.path.join(now_dir, input_audio_path0))): - return "Audio was not properly selected or doesn't exist", None - - input_audio_path1 = input_audio_path1 or input_audio_path0 - print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") - print("-------------------") - f0_up_key = int(f0_up_key) - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - input_audio_path1 = input_audio_path1 or input_audio_path0 - print(f"Attempting to load {input_audio_path1}....") - audio = load_audio(file=input_audio_path1, - sr=16000, - DoFormant=rvc_globals.DoFormant, - Quefrency=rvc_globals.Quefrency, - Timbre=rvc_globals.Timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(self.config) - - try: - self.if_f0 = self.cpt.get("f0", 1) - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - - try: - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - crepe_hop_length, - f0_autotune, - f0_file=f0_file, - f0_min=f0_min, - f0_max=f0_max - ) - except AssertionError: - message = "Mismatching index version detected (v1 with v2, or v2 with v1)." - print(message) - return message, None - except NameError: - message = "RVC libraries are still loading. Please try again in a few seconds." - print(message) - return message, None - - if self.tgt_sr != resample_sr >= 16000: - self.tgt_sr = resample_sr - index_info = ( - "Index:\n%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - end_time = time.time() - total_time = end_time - start_time - - output_folder = "audio-outputs" - os.makedirs(output_folder, exist_ok=True) - output_filename = "generated_audio_{}.wav" - output_count = 1 - while True: - current_output_path = os.path.join(output_folder, output_filename.format(output_count)) - if not os.path.exists(current_output_path): - break - output_count += 1 - - wavfile.write(current_output_path, self.tgt_sr, audio_opt) - print(f"Generated audio saved to: {current_output_path}") - return f"Success.\n {index_info}\nTime:\n npy:{times[0]}, f0:{times[1]}, infer:{times[2]}\nTotal Time: {total_time} seconds", (self.tgt_sr, audio_opt) - except: - info = traceback.format_exc() - logger.warn(info) - return info, (None, None) - - def vc_single_dont_save( - self, - sid, - input_audio_path0, - input_audio_path1, - f0_up_key, - f0_file, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ): - global total_time - total_time = 0 - start_time = time.time() - if not input_audio_path0 and not input_audio_path1: - return "You need to upload an audio", None - - if (not os.path.exists(input_audio_path0)) and (not os.path.exists(os.path.join(now_dir, input_audio_path0))): - return "Audio was not properly selected or doesn't exist", None - - input_audio_path1 = input_audio_path1 or input_audio_path0 - print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") - print("-------------------") - f0_up_key = int(f0_up_key) - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - input_audio_path1 = input_audio_path1 or input_audio_path0 - print(f"Attempting to load {input_audio_path1}....") - audio = load_audio(file=input_audio_path1, - sr=16000, - DoFormant=rvc_globals.DoFormant, - Quefrency=rvc_globals.Quefrency, - Timbre=rvc_globals.Timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(self.config) - - try: - self.if_f0 = self.cpt.get("f0", 1) - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - - try: - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - crepe_hop_length, - f0_autotune, - f0_file=f0_file, - f0_min=f0_min, - f0_max=f0_max - ) - except AssertionError: - message = "Mismatching index version detected (v1 with v2, or v2 with v1)." - print(message) - return message, None - except NameError: - message = "RVC libraries are still loading. Please try again in a few seconds." - print(message) - return message, None - - if self.tgt_sr != resample_sr >= 16000: - self.tgt_sr = resample_sr - index_info = ( - "Index:\n%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - end_time = time.time() - total_time = end_time - start_time - - return f"Success.\n {index_info}\nTime:\n npy:{times[0]}, f0:{times[1]}, infer:{times[2]}\nTotal Time: {total_time} seconds", (self.tgt_sr, audio_opt) - except: - info = traceback.format_exc() - logger.warn(info) - return info, (None, None) - - - def vc_multi( - self, - sid, - dir_path, - opt_root, - paths, - f0_up_key, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ): - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - dir_path = ( - dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - os.makedirs(opt_root, exist_ok=True) - try: - if dir_path != "": - paths = [ - os.path.join(dir_path, name) for name in os.listdir(dir_path) - ] - else: - paths = [path.name for path in paths] - except: - traceback.print_exc() - paths = [path.name for path in paths] - infos = [] - for path in paths: - info, opt = self.vc_single( - sid, - path, - f0_up_key, - None, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - ) - if "Success" in info: - try: - tgt_sr, audio_opt = opt - if format1 in ["wav", "flac"]: - sf.write( - "%s/%s.%s" - % (opt_root, os.path.basename(path), format1), - audio_opt, - tgt_sr, - ) - else: - path = "%s/%s.%s" % (opt_root, os.path.basename(path), format1) - with BytesIO() as wavf: - sf.write( - wavf, - audio_opt, - tgt_sr, - format="wav" - ) - wavf.seek(0, 0) - with open(path, "wb") as outf: - wav2(wavf, outf, format1) - except: - info += traceback.format_exc() - infos.append("%s->%s" % (os.path.basename(path), info)) - yield "\n".join(infos) - yield "\n".join(infos) - except: - yield traceback.format_exc() diff --git a/spaces/Bart92/RVC_HF/infer/modules/vc/utils.py b/spaces/Bart92/RVC_HF/infer/modules/vc/utils.py deleted file mode 100644 index a1cb0ff84097d1c7eb82373ccf19db061f595096..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/infer/modules/vc/utils.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import re -from fairseq import checkpoint_utils - - -def get_index_path_from_model(sid): - sid0strip = re.sub(r'\.pth|\.onnx$', '', sid) - sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory - - # Check if the sid0strip has the specific ending format _eXXX_sXXX - if re.match(r'.+_e\d+_s\d+$', sid0name): - base_model_name = sid0name.rsplit('_', 2)[0] - else: - base_model_name = sid0name - - return next( - ( - f - for f in [ - os.path.join(root, name) - for root, _, files in os.walk(os.getenv("index_root"), topdown=False) - for name in files - if name.endswith(".index") and "trained" not in name - ] - if base_model_name in f - ), - "", - ) - - -def load_hubert(config): - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["assets/hubert/hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - return hubert_model.eval() diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar El Juego Taxi Simulator.md b/spaces/Benson/text-generation/Examples/Cmo Descargar El Juego Taxi Simulator.md deleted file mode 100644 index 6d073a6c3853adda2b423afac6cd1f4043fa01db..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Descargar El Juego Taxi Simulator.md +++ /dev/null @@ -1,80 +0,0 @@ -
-

Cómo descargar el juego Taxi Simulator

-

Si te gusta conducir y quieres experimentar la vida de un taxista, entonces es posible que esté interesado en jugar juegos de simulador de taxi. Estos son juegos que te permiten conducir por diferentes ciudades, recoger pasajeros, completar misiones y ganar dinero. En este artículo, te mostraremos cuáles son los juegos de simulador de taxi, por qué deberías jugarlos, cómo elegir el mejor para ti, cómo descargarlos en diferentes dispositivos y cómo jugarlos. ¡Vamos a empezar!

-

Cómo descargar el juego taxi simulator


Download ✸✸✸ https://bltlly.com/2v6LqW



-

¿Qué es un juego de simulador de taxi?

-

Un juego de simulador de taxi es un tipo de juego de conducción que simula el trabajo de un taxista. Puede elegir entre una variedad de coches, personalizarlos y conducirlos en entornos realistas. También puede interactuar con los clientes, seguir las reglas de tráfico, evitar accidentes y administrar su negocio. Algunos ejemplos de juegos de simulador de taxi populares son Taxi Sim 2022 Evolution, Taxi Life: A City Driving Simulator, y Crazy Taxi.

-

¿Por qué jugar juegos de simulador de taxi?

-

Los juegos de simulador de taxi son divertidos y desafiantes por muchas razones. Aquí están algunos de los beneficios y características de jugarlos:

-
    -
  • Puedes explorar diferentes ciudades y lugares de interés, como Nueva York, Miami, Roma, Los Ángeles, Barcelona y más.
  • -
  • Puede aprender sobre las reglas de tráfico, las señales de tráfico y la etiqueta de conducción en diferentes países.
  • -
  • Puedes mejorar tus habilidades de conducción, reflejos y coordinación.
  • -
  • Puedes disfrutar de gráficos realistas, sonidos y física.
  • -
  • Puede personalizar su coche con varias opciones, como color, motor, ruedas, accesorios y más.
  • -
  • Usted puede ganar dinero y actualizar su coche o comprar nuevos.
  • -
  • Puedes competir con otros jugadores online o offline.
  • -
  • Puedes divertirte con diferentes tipos de clientes, misiones y escenarios.
  • -
-

¿Cómo elegir el mejor juego de simulador de taxi para usted?

- -
    -
  • La plataforma: Debes elegir un juego que sea compatible con tu dispositivo, ya sea PC, Android o iOS.
  • -
  • El género: Debes elegir un juego que coincida con tu preferencia, ya sea realista, árcade, casual o simulación.
  • -
  • La calificación: Usted debe elegir un juego que tiene buenas críticas y calificaciones de otros usuarios y críticos.
  • -
  • Las características: Usted debe elegir un juego que tiene las características que desea, como el modo multijugador, opciones de personalización, variedad de coches y ciudades, etc.
  • -
  • El precio: Usted debe elegir un juego que se ajuste a su presupuesto, ya sea gratuito o de pago.
  • -
-

¿Cómo descargar juegos de simulador de taxi en diferentes dispositivos?

-

PC

-

Si quieres jugar juegos de simulador de taxi en tu PC, tienes dos opciones:

-
    -
  1. Descárgalos de sitios web oficiales o tiendas en línea. Por ejemplo, puedes descargar Taxi Life: A City Driving Simulator de Steam o Crazy Taxi de Sega.
  2. -
  3. Descárgalos de sitios web o torrents de terceros. Sin embargo, esta opción es arriesgada e ilegal, ya que podría descargar virus, malware o juegos piratas. No recomendamos esta opción y le aconsejamos que la utilice bajo su propio riesgo.
  4. -
-

Android

-

Si quieres jugar juegos de simulador de taxi en tu dispositivo Android, tienes dos opciones:

-
    -
  1. Descargar desde Google Play Store. Por ejemplo, puede descargar Taxi Sim 2022 Evolution o Taxi Driver 3D: Hill Station desde allí.
  2. -
  3. Descárgalos de otras fuentes. Por ejemplo, puedes descargar Taxi Game 2 desde APKMonk. Sin embargo, esta opción también es arriesgada y podría exponer su dispositivo a aplicaciones o virus dañinos. Le sugerimos que compruebe los permisos y comentarios antes de instalar cualquier aplicación de fuentes desconocidas.
  4. -
-

iOS

- -
    -
  1. Descárgalos desde App Store. Por ejemplo, puedes descargar Taxi Sim 2020 o Taxi Game 2 desde allí.
  2. -
-

Cómo jugar juegos de simulador de taxi?

-

Una vez que haya descargado e instalado el juego de simulador de taxi elegido, puede comenzar a jugarlo. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego:

-

-
    -
  • Lee las instrucciones y tutoriales cuidadosamente. Te enseñarán los controles básicos, objetivos y características del juego.
  • -
  • Elija su coche y personalícelo según su preferencia. Puede cambiar el color, el motor, las ruedas, los accesorios y más.
  • -
  • Seleccione su ciudad y modo. Puede elegir entre diferentes ciudades y lugares de interés, como Nueva York, Miami, Roma, Los Ángeles, Barcelona y más. También puedes elegir entre diferentes modos, como carrera, roaming gratuito, multijugador, etc.
  • -
  • Conduce por la ciudad y recoge pasajeros. Puedes usar el mapa y el GPS para encontrar tu destino. También puede seguir las reglas de tráfico y evitar accidentes.
  • -
  • Gana dinero y reputación. Puedes ganar dinero completando misiones, entregando clientes y realizando acrobacias. También puedes ganar reputación siendo un buen conductor, obteniendo comentarios positivos y valoraciones de los clientes.
  • -
  • Actualizar su coche o comprar nuevos. Puede utilizar el dinero que ganó para actualizar su coche o comprar nuevos. También puede desbloquear nuevos coches completando logros o desafíos.
  • -
  • Compite con otros jugadores online o offline. Puedes unirte a carreras online o a desafíos con otros jugadores de todo el mundo. También puedes jugar sin conexión con tus amigos o familiares usando el modo multijugador local.
  • - -
-

Conclusión

-

Los juegos de simulador de taxi son una gran manera de experimentar la vida de un taxista. Son divertidos y desafiantes para cualquiera que ame conducir. En este artículo, te mostramos cuáles son los juegos de simulador de taxi, por qué deberías jugarlos, cómo elegir el mejor para ti, cómo descargarlos en diferentes dispositivos y cómo jugarlos. Esperamos que haya encontrado este artículo útil e informativo. Ahora que sabe cómo descargar juegos de simulador de taxi, ¿por qué no intentarlo?

-

Preguntas frecuentes

-

¿Cuáles son los mejores juegos de simulador de taxi?

-

La respuesta a esta pregunta depende de sus preferencias y gustos personales. Sin embargo, algunos de los juegos de simulador de taxi más populares y altamente calificados son Taxi Sim 2022 Evolution, Taxi Life: A City Driving Simulator, Crazy Taxi, -

¿Cuánto cuestan los juegos de simuladores de taxi?

-

El costo de los juegos de simulador de taxi varía dependiendo de la plataforma y el juego en sí. Algunos juegos son gratuitos para descargar y jugar, pero pueden contener anuncios o compras en la aplicación. Algunos juegos se pagan y requieren un pago único o una cuota de suscripción para descargar y jugar. Puedes consultar el precio del juego en la plataforma que estás utilizando, como Google Play Store, App Store, Steam, etc.

-

¿Son realistas los juegos de simulador de taxi?

- -

¿Puedo jugar juegos de simulador de taxi en línea con amigos?

-

Sí, puede jugar juegos de simulador de taxi en línea con amigos. Algunos juegos tienen un modo multijugador que le permite unirse a carreras en línea o desafíos con otros jugadores de todo el mundo. También puedes chatear con ellos, enviarles mensajes o invitarlos a tu juego. Algunos juegos también tienen un modo multijugador local que te permite jugar sin conexión con tus amigos o familiares usando el mismo dispositivo o una red local.

-

¿Cómo puedo mejorar mis habilidades de conducción de taxi en juegos de simulador de taxi?

-

Si quieres mejorar tus habilidades de conducción de taxi en los juegos de simulador de taxi, aquí hay algunos consejos que pueden ayudarte:

-
    -
  • Practica regularmente. Cuanto más juegues, más aprenderás y mejorarás.
  • -
  • Vea tutoriales y guías. Puede encontrar muchos videos y artículos en línea que le enseñan cómo jugar mejor los juegos de simulador de taxi.
  • -
  • Aprende de tus errores. Puedes revisar tu rendimiento y ver qué hiciste mal o qué puedes hacer mejor.
  • -
  • Ponte a prueba. Puedes probar diferentes modos, niveles, misiones y escenarios que ponen a prueba tus habilidades y habilidades.
  • -
  • Diviértete. No te tomes el juego demasiado en serio ni te estreses. Disfruta del juego y diviértete con él.
  • -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/session.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/session.py deleted file mode 100644 index 444f60e8b7e2b823c39bfceb0846b6418cb831ed..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/session.py +++ /dev/null @@ -1,1229 +0,0 @@ -# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ -# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -""" -This module contains the main interface to the botocore package, the -Session object. -""" - -import copy -import logging -import os -import platform -import socket -import warnings - -import botocore.client -import botocore.configloader -import botocore.credentials -import botocore.tokens -from botocore import ( - UNSIGNED, - __version__, - handlers, - invoke_initializers, - monitoring, - paginate, - retryhandler, - translate, - waiter, -) -from botocore.compat import HAS_CRT, MutableMapping -from botocore.configprovider import ( - BOTOCORE_DEFAUT_SESSION_VARIABLES, - ConfigChainFactory, - ConfigValueStore, - DefaultConfigResolver, - SmartDefaultsConfigStoreFactory, - create_botocore_default_config_mapping, -) -from botocore.errorfactory import ClientExceptionsFactory -from botocore.exceptions import ( - ConfigNotFound, - InvalidDefaultsMode, - PartialCredentialsError, - ProfileNotFound, - UnknownServiceError, -) -from botocore.hooks import ( - EventAliaser, - HierarchicalEmitter, - first_non_none_response, -) -from botocore.loaders import create_loader -from botocore.model import ServiceModel -from botocore.parsers import ResponseParserFactory -from botocore.regions import EndpointResolver -from botocore.utils import ( - EVENT_ALIASES, - IMDSRegionProvider, - validate_region_name, -) - -logger = logging.getLogger(__name__) - - -class Session: - """ - The Session object collects together useful functionality - from `botocore` as well as important data such as configuration - information and credentials into a single, easy-to-use object. - - :ivar available_profiles: A list of profiles defined in the config - file associated with this session. - :ivar profile: The current profile. - """ - - SESSION_VARIABLES = copy.copy(BOTOCORE_DEFAUT_SESSION_VARIABLES) - - #: The default format string to use when configuring the botocore logger. - LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' - - def __init__( - self, - session_vars=None, - event_hooks=None, - include_builtin_handlers=True, - profile=None, - ): - """ - Create a new Session object. - - :type session_vars: dict - :param session_vars: A dictionary that is used to override some or all - of the environment variables associated with this session. The - key/value pairs defined in this dictionary will override the - corresponding variables defined in ``SESSION_VARIABLES``. - - :type event_hooks: BaseEventHooks - :param event_hooks: The event hooks object to use. If one is not - provided, an event hooks object will be automatically created - for you. - - :type include_builtin_handlers: bool - :param include_builtin_handlers: Indicates whether or not to - automatically register builtin handlers. - - :type profile: str - :param profile: The name of the profile to use for this - session. Note that the profile can only be set when - the session is created. - - """ - if event_hooks is None: - self._original_handler = HierarchicalEmitter() - else: - self._original_handler = event_hooks - self._events = EventAliaser(self._original_handler) - if include_builtin_handlers: - self._register_builtin_handlers(self._events) - self.user_agent_name = 'Botocore' - self.user_agent_version = __version__ - self.user_agent_extra = '' - # The _profile attribute is just used to cache the value - # of the current profile to avoid going through the normal - # config lookup process each access time. - self._profile = None - self._config = None - self._credentials = None - self._auth_token = None - self._profile_map = None - # This is a dict that stores per session specific config variable - # overrides via set_config_variable(). - self._session_instance_vars = {} - if profile is not None: - self._session_instance_vars['profile'] = profile - self._client_config = None - self._last_client_region_used = None - self._components = ComponentLocator() - self._internal_components = ComponentLocator() - self._register_components() - self.session_var_map = SessionVarDict(self, self.SESSION_VARIABLES) - if session_vars is not None: - self.session_var_map.update(session_vars) - invoke_initializers(self) - - def _register_components(self): - self._register_credential_provider() - self._register_token_provider() - self._register_data_loader() - self._register_endpoint_resolver() - self._register_event_emitter() - self._register_response_parser_factory() - self._register_exceptions_factory() - self._register_config_store() - self._register_monitor() - self._register_default_config_resolver() - self._register_smart_defaults_factory() - - def _register_event_emitter(self): - self._components.register_component('event_emitter', self._events) - - def _register_token_provider(self): - self._components.lazy_register_component( - 'token_provider', self._create_token_resolver - ) - - def _create_token_resolver(self): - return botocore.tokens.create_token_resolver(self) - - def _register_credential_provider(self): - self._components.lazy_register_component( - 'credential_provider', self._create_credential_resolver - ) - - def _create_credential_resolver(self): - return botocore.credentials.create_credential_resolver( - self, region_name=self._last_client_region_used - ) - - def _register_data_loader(self): - self._components.lazy_register_component( - 'data_loader', - lambda: create_loader(self.get_config_variable('data_path')), - ) - - def _register_endpoint_resolver(self): - def create_default_resolver(): - loader = self.get_component('data_loader') - endpoints, path = loader.load_data_with_path('endpoints') - uses_builtin = loader.is_builtin_path(path) - return EndpointResolver(endpoints, uses_builtin_data=uses_builtin) - - self._internal_components.lazy_register_component( - 'endpoint_resolver', create_default_resolver - ) - - def _register_default_config_resolver(self): - def create_default_config_resolver(): - loader = self.get_component('data_loader') - defaults = loader.load_data('sdk-default-configuration') - return DefaultConfigResolver(defaults) - - self._internal_components.lazy_register_component( - 'default_config_resolver', create_default_config_resolver - ) - - def _register_smart_defaults_factory(self): - def create_smart_defaults_factory(): - default_config_resolver = self._get_internal_component( - 'default_config_resolver' - ) - imds_region_provider = IMDSRegionProvider(session=self) - return SmartDefaultsConfigStoreFactory( - default_config_resolver, imds_region_provider - ) - - self._internal_components.lazy_register_component( - 'smart_defaults_factory', create_smart_defaults_factory - ) - - def _register_response_parser_factory(self): - self._components.register_component( - 'response_parser_factory', ResponseParserFactory() - ) - - def _register_exceptions_factory(self): - self._internal_components.register_component( - 'exceptions_factory', ClientExceptionsFactory() - ) - - def _register_builtin_handlers(self, events): - for spec in handlers.BUILTIN_HANDLERS: - if len(spec) == 2: - event_name, handler = spec - self.register(event_name, handler) - else: - event_name, handler, register_type = spec - if register_type is handlers.REGISTER_FIRST: - self._events.register_first(event_name, handler) - elif register_type is handlers.REGISTER_LAST: - self._events.register_last(event_name, handler) - - def _register_config_store(self): - config_store_component = ConfigValueStore( - mapping=create_botocore_default_config_mapping(self) - ) - self._components.register_component( - 'config_store', config_store_component - ) - - def _register_monitor(self): - self._internal_components.lazy_register_component( - 'monitor', self._create_csm_monitor - ) - - def _create_csm_monitor(self): - if self.get_config_variable('csm_enabled'): - client_id = self.get_config_variable('csm_client_id') - host = self.get_config_variable('csm_host') - port = self.get_config_variable('csm_port') - handler = monitoring.Monitor( - adapter=monitoring.MonitorEventAdapter(), - publisher=monitoring.SocketPublisher( - socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM), - host=host, - port=port, - serializer=monitoring.CSMSerializer( - csm_client_id=client_id - ), - ), - ) - return handler - return None - - def _get_crt_version(self): - try: - import awscrt - - return awscrt.__version__ - except AttributeError: - return "Unknown" - - @property - def available_profiles(self): - return list(self._build_profile_map().keys()) - - def _build_profile_map(self): - # This will build the profile map if it has not been created, - # otherwise it will return the cached value. The profile map - # is a list of profile names, to the config values for the profile. - if self._profile_map is None: - self._profile_map = self.full_config['profiles'] - return self._profile_map - - @property - def profile(self): - if self._profile is None: - profile = self.get_config_variable('profile') - self._profile = profile - return self._profile - - def get_config_variable(self, logical_name, methods=None): - if methods is not None: - return self._get_config_variable_with_custom_methods( - logical_name, methods - ) - return self.get_component('config_store').get_config_variable( - logical_name - ) - - def _get_config_variable_with_custom_methods(self, logical_name, methods): - # If a custom list of methods was supplied we need to perserve the - # behavior with the new system. To do so a new chain that is a copy of - # the old one will be constructed, but only with the supplied methods - # being added to the chain. This chain will be consulted for a value - # and then thrown out. This is not efficient, nor is the methods arg - # used in botocore, this is just for backwards compatibility. - chain_builder = SubsetChainConfigFactory(session=self, methods=methods) - mapping = create_botocore_default_config_mapping(self) - for name, config_options in self.session_var_map.items(): - config_name, env_vars, default, typecast = config_options - build_chain_config_args = { - 'conversion_func': typecast, - 'default': default, - } - if 'instance' in methods: - build_chain_config_args['instance_name'] = name - if 'env' in methods: - build_chain_config_args['env_var_names'] = env_vars - if 'config' in methods: - build_chain_config_args['config_property_name'] = config_name - mapping[name] = chain_builder.create_config_chain( - **build_chain_config_args - ) - config_store_component = ConfigValueStore(mapping=mapping) - value = config_store_component.get_config_variable(logical_name) - return value - - def set_config_variable(self, logical_name, value): - """Set a configuration variable to a specific value. - - By using this method, you can override the normal lookup - process used in ``get_config_variable`` by explicitly setting - a value. Subsequent calls to ``get_config_variable`` will - use the ``value``. This gives you per-session specific - configuration values. - - :: - >>> # Assume logical name 'foo' maps to env var 'FOO' - >>> os.environ['FOO'] = 'myvalue' - >>> s.get_config_variable('foo') - 'myvalue' - >>> s.set_config_variable('foo', 'othervalue') - >>> s.get_config_variable('foo') - 'othervalue' - - :type logical_name: str - :param logical_name: The logical name of the session variable - you want to set. These are the keys in ``SESSION_VARIABLES``. - :param value: The value to associate with the config variable. - - """ - logger.debug( - "Setting config variable for %s to %r", - logical_name, - value, - ) - self._session_instance_vars[logical_name] = value - - def instance_variables(self): - return copy.copy(self._session_instance_vars) - - def get_scoped_config(self): - """ - Returns the config values from the config file scoped to the current - profile. - - The configuration data is loaded **only** from the config file. - It does not resolve variables based on different locations - (e.g. first from the session instance, then from environment - variables, then from the config file). If you want this lookup - behavior, use the ``get_config_variable`` method instead. - - Note that this configuration is specific to a single profile (the - ``profile`` session variable). - - If the ``profile`` session variable is set and the profile does - not exist in the config file, a ``ProfileNotFound`` exception - will be raised. - - :raises: ConfigNotFound, ConfigParseError, ProfileNotFound - :rtype: dict - - """ - profile_name = self.get_config_variable('profile') - profile_map = self._build_profile_map() - # If a profile is not explicitly set return the default - # profile config or an empty config dict if we don't have - # a default profile. - if profile_name is None: - return profile_map.get('default', {}) - elif profile_name not in profile_map: - # Otherwise if they specified a profile, it has to - # exist (even if it's the default profile) otherwise - # we complain. - raise ProfileNotFound(profile=profile_name) - else: - return profile_map[profile_name] - - @property - def full_config(self): - """Return the parsed config file. - - The ``get_config`` method returns the config associated with the - specified profile. This property returns the contents of the - **entire** config file. - - :rtype: dict - """ - if self._config is None: - try: - config_file = self.get_config_variable('config_file') - self._config = botocore.configloader.load_config(config_file) - except ConfigNotFound: - self._config = {'profiles': {}} - try: - # Now we need to inject the profiles from the - # credentials file. We don't actually need the values - # in the creds file, only the profile names so that we - # can validate the user is not referring to a nonexistent - # profile. - cred_file = self.get_config_variable('credentials_file') - cred_profiles = botocore.configloader.raw_config_parse( - cred_file - ) - for profile in cred_profiles: - cred_vars = cred_profiles[profile] - if profile not in self._config['profiles']: - self._config['profiles'][profile] = cred_vars - else: - self._config['profiles'][profile].update(cred_vars) - except ConfigNotFound: - pass - return self._config - - def get_default_client_config(self): - """Retrieves the default config for creating clients - - :rtype: botocore.client.Config - :returns: The default client config object when creating clients. If - the value is ``None`` then there is no default config object - attached to the session. - """ - return self._client_config - - def set_default_client_config(self, client_config): - """Sets the default config for creating clients - - :type client_config: botocore.client.Config - :param client_config: The default client config object when creating - clients. If the value is ``None`` then there is no default config - object attached to the session. - """ - self._client_config = client_config - - def set_credentials(self, access_key, secret_key, token=None): - """ - Manually create credentials for this session. If you would - prefer to use botocore without a config file, environment variables, - or IAM roles, you can pass explicit credentials into this - method to establish credentials for this session. - - :type access_key: str - :param access_key: The access key part of the credentials. - - :type secret_key: str - :param secret_key: The secret key part of the credentials. - - :type token: str - :param token: An option session token used by STS session - credentials. - """ - self._credentials = botocore.credentials.Credentials( - access_key, secret_key, token - ) - - def get_credentials(self): - """ - Return the :class:`botocore.credential.Credential` object - associated with this session. If the credentials have not - yet been loaded, this will attempt to load them. If they - have already been loaded, this will return the cached - credentials. - - """ - if self._credentials is None: - self._credentials = self._components.get_component( - 'credential_provider' - ).load_credentials() - return self._credentials - - def get_auth_token(self): - """ - Return the :class:`botocore.tokens.AuthToken` object associated with - this session. If the authorization token has not yet been loaded, this - will attempt to load it. If it has already been loaded, this will - return the cached authorization token. - - """ - if self._auth_token is None: - provider = self._components.get_component('token_provider') - self._auth_token = provider.load_token() - return self._auth_token - - def user_agent(self): - """ - Return a string suitable for use as a User-Agent header. - The string will be of the form: - - / Python/ / - - Where: - - - agent_name is the value of the `user_agent_name` attribute - of the session object (`Botocore` by default). - - agent_version is the value of the `user_agent_version` - attribute of the session object (the botocore version by default). - by default. - - py_ver is the version of the Python interpreter beng used. - - plat_name is the name of the platform (e.g. Darwin) - - plat_ver is the version of the platform - - exec_env is exec-env/$AWS_EXECUTION_ENV - - If ``user_agent_extra`` is not empty, then this value will be - appended to the end of the user agent string. - - """ - base = ( - f'{self.user_agent_name}/{self.user_agent_version} ' - f'Python/{platform.python_version()} ' - f'{platform.system()}/{platform.release()}' - ) - if HAS_CRT: - base += ' awscrt/%s' % self._get_crt_version() - if os.environ.get('AWS_EXECUTION_ENV') is not None: - base += ' exec-env/%s' % os.environ.get('AWS_EXECUTION_ENV') - if self.user_agent_extra: - base += ' %s' % self.user_agent_extra - - return base - - def get_data(self, data_path): - """ - Retrieve the data associated with `data_path`. - - :type data_path: str - :param data_path: The path to the data you wish to retrieve. - """ - return self.get_component('data_loader').load_data(data_path) - - def get_service_model(self, service_name, api_version=None): - """Get the service model object. - - :type service_name: string - :param service_name: The service name - - :type api_version: string - :param api_version: The API version of the service. If none is - provided, then the latest API version will be used. - - :rtype: L{botocore.model.ServiceModel} - :return: The botocore service model for the service. - - """ - service_description = self.get_service_data(service_name, api_version) - return ServiceModel(service_description, service_name=service_name) - - def get_waiter_model(self, service_name, api_version=None): - loader = self.get_component('data_loader') - waiter_config = loader.load_service_model( - service_name, 'waiters-2', api_version - ) - return waiter.WaiterModel(waiter_config) - - def get_paginator_model(self, service_name, api_version=None): - loader = self.get_component('data_loader') - paginator_config = loader.load_service_model( - service_name, 'paginators-1', api_version - ) - return paginate.PaginatorModel(paginator_config) - - def get_service_data(self, service_name, api_version=None): - """ - Retrieve the fully merged data associated with a service. - """ - data_path = service_name - service_data = self.get_component('data_loader').load_service_model( - data_path, type_name='service-2', api_version=api_version - ) - service_id = EVENT_ALIASES.get(service_name, service_name) - self._events.emit( - 'service-data-loaded.%s' % service_id, - service_data=service_data, - service_name=service_name, - session=self, - ) - return service_data - - def get_available_services(self): - """ - Return a list of names of available services. - """ - return self.get_component('data_loader').list_available_services( - type_name='service-2' - ) - - def set_debug_logger(self, logger_name='botocore'): - """ - Convenience function to quickly configure full debug output - to go to the console. - """ - self.set_stream_logger(logger_name, logging.DEBUG) - - def set_stream_logger( - self, logger_name, log_level, stream=None, format_string=None - ): - """ - Convenience method to configure a stream logger. - - :type logger_name: str - :param logger_name: The name of the logger to configure - - :type log_level: str - :param log_level: The log level to set for the logger. This - is any param supported by the ``.setLevel()`` method of - a ``Log`` object. - - :type stream: file - :param stream: A file like object to log to. If none is provided - then sys.stderr will be used. - - :type format_string: str - :param format_string: The format string to use for the log - formatter. If none is provided this will default to - ``self.LOG_FORMAT``. - - """ - log = logging.getLogger(logger_name) - log.setLevel(logging.DEBUG) - - ch = logging.StreamHandler(stream) - ch.setLevel(log_level) - - # create formatter - if format_string is None: - format_string = self.LOG_FORMAT - formatter = logging.Formatter(format_string) - - # add formatter to ch - ch.setFormatter(formatter) - - # add ch to logger - log.addHandler(ch) - - def set_file_logger(self, log_level, path, logger_name='botocore'): - """ - Convenience function to quickly configure any level of logging - to a file. - - :type log_level: int - :param log_level: A log level as specified in the `logging` module - - :type path: string - :param path: Path to the log file. The file will be created - if it doesn't already exist. - """ - log = logging.getLogger(logger_name) - log.setLevel(logging.DEBUG) - - # create console handler and set level to debug - ch = logging.FileHandler(path) - ch.setLevel(log_level) - - # create formatter - formatter = logging.Formatter(self.LOG_FORMAT) - - # add formatter to ch - ch.setFormatter(formatter) - - # add ch to logger - log.addHandler(ch) - - def register( - self, event_name, handler, unique_id=None, unique_id_uses_count=False - ): - """Register a handler with an event. - - :type event_name: str - :param event_name: The name of the event. - - :type handler: callable - :param handler: The callback to invoke when the event - is emitted. This object must be callable, and must - accept ``**kwargs``. If either of these preconditions are - not met, a ``ValueError`` will be raised. - - :type unique_id: str - :param unique_id: An optional identifier to associate with the - registration. A unique_id can only be used once for - the entire session registration (unless it is unregistered). - This can be used to prevent an event handler from being - registered twice. - - :param unique_id_uses_count: boolean - :param unique_id_uses_count: Specifies if the event should maintain - a count when a ``unique_id`` is registered and unregisted. The - event can only be completely unregistered once every register call - using the unique id has been matched by an ``unregister`` call. - If ``unique_id`` is specified, subsequent ``register`` - calls must use the same value for ``unique_id_uses_count`` - as the ``register`` call that first registered the event. - - :raises ValueError: If the call to ``register`` uses ``unique_id`` - but the value for ``unique_id_uses_count`` differs from the - ``unique_id_uses_count`` value declared by the very first - ``register`` call for that ``unique_id``. - """ - self._events.register( - event_name, - handler, - unique_id, - unique_id_uses_count=unique_id_uses_count, - ) - - def unregister( - self, - event_name, - handler=None, - unique_id=None, - unique_id_uses_count=False, - ): - """Unregister a handler with an event. - - :type event_name: str - :param event_name: The name of the event. - - :type handler: callable - :param handler: The callback to unregister. - - :type unique_id: str - :param unique_id: A unique identifier identifying the callback - to unregister. You can provide either the handler or the - unique_id, you do not have to provide both. - - :param unique_id_uses_count: boolean - :param unique_id_uses_count: Specifies if the event should maintain - a count when a ``unique_id`` is registered and unregisted. The - event can only be completely unregistered once every ``register`` - call using the ``unique_id`` has been matched by an ``unregister`` - call. If the ``unique_id`` is specified, subsequent - ``unregister`` calls must use the same value for - ``unique_id_uses_count`` as the ``register`` call that first - registered the event. - - :raises ValueError: If the call to ``unregister`` uses ``unique_id`` - but the value for ``unique_id_uses_count`` differs from the - ``unique_id_uses_count`` value declared by the very first - ``register`` call for that ``unique_id``. - """ - self._events.unregister( - event_name, - handler=handler, - unique_id=unique_id, - unique_id_uses_count=unique_id_uses_count, - ) - - def emit(self, event_name, **kwargs): - return self._events.emit(event_name, **kwargs) - - def emit_first_non_none_response(self, event_name, **kwargs): - responses = self._events.emit(event_name, **kwargs) - return first_non_none_response(responses) - - def get_component(self, name): - try: - return self._components.get_component(name) - except ValueError: - if name in ['endpoint_resolver', 'exceptions_factory']: - warnings.warn( - 'Fetching the %s component with the get_component() ' - 'method is deprecated as the component has always been ' - 'considered an internal interface of botocore' % name, - DeprecationWarning, - ) - return self._internal_components.get_component(name) - raise - - def _get_internal_component(self, name): - # While this method may be called by botocore classes outside of the - # Session, this method should **never** be used by a class that lives - # outside of botocore. - return self._internal_components.get_component(name) - - def _register_internal_component(self, name, component): - # While this method may be called by botocore classes outside of the - # Session, this method should **never** be used by a class that lives - # outside of botocore. - return self._internal_components.register_component(name, component) - - def register_component(self, name, component): - self._components.register_component(name, component) - - def lazy_register_component(self, name, component): - self._components.lazy_register_component(name, component) - - def create_client( - self, - service_name, - region_name=None, - api_version=None, - use_ssl=True, - verify=None, - endpoint_url=None, - aws_access_key_id=None, - aws_secret_access_key=None, - aws_session_token=None, - config=None, - ): - """Create a botocore client. - - :type service_name: string - :param service_name: The name of the service for which a client will - be created. You can use the ``Session.get_available_services()`` - method to get a list of all available service names. - - :type region_name: string - :param region_name: The name of the region associated with the client. - A client is associated with a single region. - - :type api_version: string - :param api_version: The API version to use. By default, botocore will - use the latest API version when creating a client. You only need - to specify this parameter if you want to use a previous API version - of the client. - - :type use_ssl: boolean - :param use_ssl: Whether or not to use SSL. By default, SSL is used. - Note that not all services support non-ssl connections. - - :type verify: boolean/string - :param verify: Whether or not to verify SSL certificates. - By default SSL certificates are verified. You can provide the - following values: - - * False - do not validate SSL certificates. SSL will still be - used (unless use_ssl is False), but SSL certificates - will not be verified. - * path/to/cert/bundle.pem - A filename of the CA cert bundle to - uses. You can specify this argument if you want to use a - different CA cert bundle than the one used by botocore. - - :type endpoint_url: string - :param endpoint_url: The complete URL to use for the constructed - client. Normally, botocore will automatically construct the - appropriate URL to use when communicating with a service. You can - specify a complete URL (including the "http/https" scheme) to - override this behavior. If this value is provided, then - ``use_ssl`` is ignored. - - :type aws_access_key_id: string - :param aws_access_key_id: The access key to use when creating - the client. This is entirely optional, and if not provided, - the credentials configured for the session will automatically - be used. You only need to provide this argument if you want - to override the credentials used for this specific client. - - :type aws_secret_access_key: string - :param aws_secret_access_key: The secret key to use when creating - the client. Same semantics as aws_access_key_id above. - - :type aws_session_token: string - :param aws_session_token: The session token to use when creating - the client. Same semantics as aws_access_key_id above. - - :type config: botocore.client.Config - :param config: Advanced client configuration options. If a value - is specified in the client config, its value will take precedence - over environment variables and configuration values, but not over - a value passed explicitly to the method. If a default config - object is set on the session, the config object used when creating - the client will be the result of calling ``merge()`` on the - default config with the config provided to this call. - - :rtype: botocore.client.BaseClient - :return: A botocore client instance - - """ - default_client_config = self.get_default_client_config() - # If a config is provided and a default config is set, then - # use the config resulting from merging the two. - if config is not None and default_client_config is not None: - config = default_client_config.merge(config) - # If a config was not provided then use the default - # client config from the session - elif default_client_config is not None: - config = default_client_config - - region_name = self._resolve_region_name(region_name, config) - - # Figure out the verify value base on the various - # configuration options. - if verify is None: - verify = self.get_config_variable('ca_bundle') - - if api_version is None: - api_version = self.get_config_variable('api_versions').get( - service_name, None - ) - - loader = self.get_component('data_loader') - event_emitter = self.get_component('event_emitter') - response_parser_factory = self.get_component('response_parser_factory') - if config is not None and config.signature_version is UNSIGNED: - credentials = None - elif ( - aws_access_key_id is not None and aws_secret_access_key is not None - ): - credentials = botocore.credentials.Credentials( - access_key=aws_access_key_id, - secret_key=aws_secret_access_key, - token=aws_session_token, - ) - elif self._missing_cred_vars(aws_access_key_id, aws_secret_access_key): - raise PartialCredentialsError( - provider='explicit', - cred_var=self._missing_cred_vars( - aws_access_key_id, aws_secret_access_key - ), - ) - else: - credentials = self.get_credentials() - auth_token = self.get_auth_token() - endpoint_resolver = self._get_internal_component('endpoint_resolver') - exceptions_factory = self._get_internal_component('exceptions_factory') - config_store = self.get_component('config_store') - defaults_mode = self._resolve_defaults_mode(config, config_store) - if defaults_mode != 'legacy': - smart_defaults_factory = self._get_internal_component( - 'smart_defaults_factory' - ) - config_store = copy.deepcopy(config_store) - smart_defaults_factory.merge_smart_defaults( - config_store, defaults_mode, region_name - ) - client_creator = botocore.client.ClientCreator( - loader, - endpoint_resolver, - self.user_agent(), - event_emitter, - retryhandler, - translate, - response_parser_factory, - exceptions_factory, - config_store, - ) - client = client_creator.create_client( - service_name=service_name, - region_name=region_name, - is_secure=use_ssl, - endpoint_url=endpoint_url, - verify=verify, - credentials=credentials, - scoped_config=self.get_scoped_config(), - client_config=config, - api_version=api_version, - auth_token=auth_token, - ) - monitor = self._get_internal_component('monitor') - if monitor is not None: - monitor.register(client.meta.events) - return client - - def _resolve_region_name(self, region_name, config): - # Figure out the user-provided region based on the various - # configuration options. - if region_name is None: - if config and config.region_name is not None: - region_name = config.region_name - else: - region_name = self.get_config_variable('region') - - validate_region_name(region_name) - # For any client that we create in retrieving credentials - # we want to create it using the same region as specified in - # creating this client. It is important to note though that the - # credentials client is only created once per session. So if a new - # client is created with a different region, its credential resolver - # will use the region of the first client. However, that is not an - # issue as of now because the credential resolver uses only STS and - # the credentials returned at regional endpoints are valid across - # all regions in the partition. - self._last_client_region_used = region_name - return region_name - - def _resolve_defaults_mode(self, client_config, config_store): - mode = config_store.get_config_variable('defaults_mode') - - if client_config and client_config.defaults_mode: - mode = client_config.defaults_mode - - default_config_resolver = self._get_internal_component( - 'default_config_resolver' - ) - default_modes = default_config_resolver.get_default_modes() - lmode = mode.lower() - if lmode not in default_modes: - raise InvalidDefaultsMode( - mode=mode, valid_modes=', '.join(default_modes) - ) - - return lmode - - def _missing_cred_vars(self, access_key, secret_key): - if access_key is not None and secret_key is None: - return 'aws_secret_access_key' - if secret_key is not None and access_key is None: - return 'aws_access_key_id' - return None - - def get_available_partitions(self): - """Lists the available partitions found on disk - - :rtype: list - :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]) - """ - resolver = self._get_internal_component('endpoint_resolver') - return resolver.get_available_partitions() - - def get_partition_for_region(self, region_name): - """Lists the partition name of a particular region. - - :type region_name: string - :param region_name: Name of the region to list partition for (e.g., - us-east-1). - - :rtype: string - :return: Returns the respective partition name (e.g., aws). - """ - resolver = self._get_internal_component('endpoint_resolver') - return resolver.get_partition_for_region(region_name) - - def get_available_regions( - self, service_name, partition_name='aws', allow_non_regional=False - ): - """Lists the region and endpoint names of a particular partition. - - :type service_name: string - :param service_name: Name of a service to list endpoint for (e.g., s3). - This parameter accepts a service name (e.g., "elb") or endpoint - prefix (e.g., "elasticloadbalancing"). - - :type partition_name: string - :param partition_name: Name of the partition to limit endpoints to. - (e.g., aws for the public AWS endpoints, aws-cn for AWS China - endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc. - - :type allow_non_regional: bool - :param allow_non_regional: Set to True to include endpoints that are - not regional endpoints (e.g., s3-external-1, - fips-us-gov-west-1, etc). - :return: Returns a list of endpoint names (e.g., ["us-east-1"]). - """ - resolver = self._get_internal_component('endpoint_resolver') - results = [] - try: - service_data = self.get_service_data(service_name) - endpoint_prefix = service_data['metadata'].get( - 'endpointPrefix', service_name - ) - results = resolver.get_available_endpoints( - endpoint_prefix, partition_name, allow_non_regional - ) - except UnknownServiceError: - pass - return results - - -class ComponentLocator: - """Service locator for session components.""" - - def __init__(self): - self._components = {} - self._deferred = {} - - def get_component(self, name): - if name in self._deferred: - factory = self._deferred[name] - self._components[name] = factory() - # Only delete the component from the deferred dict after - # successfully creating the object from the factory as well as - # injecting the instantiated value into the _components dict. - del self._deferred[name] - try: - return self._components[name] - except KeyError: - raise ValueError("Unknown component: %s" % name) - - def register_component(self, name, component): - self._components[name] = component - try: - del self._deferred[name] - except KeyError: - pass - - def lazy_register_component(self, name, no_arg_factory): - self._deferred[name] = no_arg_factory - try: - del self._components[name] - except KeyError: - pass - - -class SessionVarDict(MutableMapping): - def __init__(self, session, session_vars): - self._session = session - self._store = copy.copy(session_vars) - - def __getitem__(self, key): - return self._store[key] - - def __setitem__(self, key, value): - self._store[key] = value - self._update_config_store_from_session_vars(key, value) - - def __delitem__(self, key): - del self._store[key] - - def __iter__(self): - return iter(self._store) - - def __len__(self): - return len(self._store) - - def _update_config_store_from_session_vars( - self, logical_name, config_options - ): - # This is for backwards compatibility. The new preferred way to - # modify configuration logic is to use the component system to get - # the config_store component from the session, and then update - # a key with a custom config provider(s). - # This backwards compatibility method takes the old session_vars - # list of tuples and and transforms that into a set of updates to - # the config_store component. - config_chain_builder = ConfigChainFactory(session=self._session) - config_name, env_vars, default, typecast = config_options - config_store = self._session.get_component('config_store') - config_store.set_config_provider( - logical_name, - config_chain_builder.create_config_chain( - instance_name=logical_name, - env_var_names=env_vars, - config_property_names=config_name, - default=default, - conversion_func=typecast, - ), - ) - - -class SubsetChainConfigFactory: - """A class for creating backwards compatible configuration chains. - - This class can be used instead of - :class:`botocore.configprovider.ConfigChainFactory` to make it honor the - methods argument to get_config_variable. This class can be used to filter - out providers that are not in the methods tuple when creating a new config - chain. - """ - - def __init__(self, session, methods, environ=None): - self._factory = ConfigChainFactory(session, environ) - self._supported_methods = methods - - def create_config_chain( - self, - instance_name=None, - env_var_names=None, - config_property_name=None, - default=None, - conversion_func=None, - ): - """Build a config chain following the standard botocore pattern. - - This config chain factory will omit any providers not in the methods - tuple provided at initialization. For example if given the tuple - ('instance', 'config',) it will not inject the environment provider - into the standard config chain. This lets the botocore session support - the custom ``methods`` argument for all the default botocore config - variables when calling ``get_config_variable``. - """ - if 'instance' not in self._supported_methods: - instance_name = None - if 'env' not in self._supported_methods: - env_var_names = None - if 'config' not in self._supported_methods: - config_property_name = None - return self._factory.create_config_chain( - instance_name=instance_name, - env_var_names=env_var_names, - config_property_names=config_property_name, - default=default, - conversion_func=conversion_func, - ) - - -def get_session(env_vars=None): - """ - Return a new session object. - """ - return Session(env_vars) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/manifest.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/manifest.py deleted file mode 100644 index ca0fe442d9ca499466df9438df16eca405c5f102..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/manifest.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2013 Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Class representing the list of files in a distribution. - -Equivalent to distutils.filelist, but fixes some problems. -""" -import fnmatch -import logging -import os -import re -import sys - -from . import DistlibException -from .compat import fsdecode -from .util import convert_path - - -__all__ = ['Manifest'] - -logger = logging.getLogger(__name__) - -# a \ followed by some spaces + EOL -_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) -_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) - -# -# Due to the different results returned by fnmatch.translate, we need -# to do slightly different processing for Python 2.7 and 3.2 ... this needed -# to be brought in for Python 3.6 onwards. -# -_PYTHON_VERSION = sys.version_info[:2] - -class Manifest(object): - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. - """ - - def __init__(self, base=None): - """ - Initialise an instance. - - :param base: The base directory to explore under. - """ - self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) - self.prefix = self.base + os.sep - self.allfiles = None - self.files = set() - - # - # Public API - # - - def findall(self): - """Find all files under the base and set ``allfiles`` to the absolute - pathnames of files found. - """ - from stat import S_ISREG, S_ISDIR, S_ISLNK - - self.allfiles = allfiles = [] - root = self.base - stack = [root] - pop = stack.pop - push = stack.append - - while stack: - root = pop() - names = os.listdir(root) - - for name in names: - fullname = os.path.join(root, name) - - # Avoid excess stat calls -- just one will do, thank you! - stat = os.stat(fullname) - mode = stat.st_mode - if S_ISREG(mode): - allfiles.append(fsdecode(fullname)) - elif S_ISDIR(mode) and not S_ISLNK(mode): - push(fullname) - - def add(self, item): - """ - Add a file to the manifest. - - :param item: The pathname to add. This can be relative to the base. - """ - if not item.startswith(self.prefix): - item = os.path.join(self.base, item) - self.files.add(os.path.normpath(item)) - - def add_many(self, items): - """ - Add a list of files to the manifest. - - :param items: The pathnames to add. These can be relative to the base. - """ - for item in items: - self.add(item) - - def sorted(self, wantdirs=False): - """ - Return sorted files in directory order - """ - - def add_dir(dirs, d): - dirs.add(d) - logger.debug('add_dir added %s', d) - if d != self.base: - parent, _ = os.path.split(d) - assert parent not in ('', '/') - add_dir(dirs, parent) - - result = set(self.files) # make a copy! - if wantdirs: - dirs = set() - for f in result: - add_dir(dirs, os.path.dirname(f)) - result |= dirs - return [os.path.join(*path_tuple) for path_tuple in - sorted(os.path.split(path) for path in result)] - - def clear(self): - """Clear all collected files.""" - self.files = set() - self.allfiles = [] - - def process_directive(self, directive): - """ - Process a directive which either adds some files from ``allfiles`` to - ``files``, or removes some files from ``files``. - - :param directive: The directive to process. This should be in a format - compatible with distutils ``MANIFEST.in`` files: - - http://docs.python.org/distutils/sourcedist.html#commands - """ - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dirpattern). - action, patterns, thedir, dirpattern = self._parse_directive(directive) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=True): - logger.warning('no files found matching %r', pattern) - - elif action == 'exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=True) - #if not found: - # logger.warning('no previously-included files ' - # 'found matching %r', pattern) - - elif action == 'global-include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=False): - logger.warning('no files found matching %r ' - 'anywhere in distribution', pattern) - - elif action == 'global-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=False) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found anywhere in ' - # 'distribution', pattern) - - elif action == 'recursive-include': - for pattern in patterns: - if not self._include_pattern(pattern, prefix=thedir): - logger.warning('no files found matching %r ' - 'under directory %r', pattern, thedir) - - elif action == 'recursive-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, prefix=thedir) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found under directory %r', - # pattern, thedir) - - elif action == 'graft': - if not self._include_pattern(None, prefix=dirpattern): - logger.warning('no directories found matching %r', - dirpattern) - - elif action == 'prune': - if not self._exclude_pattern(None, prefix=dirpattern): - logger.warning('no previously-included directories found ' - 'matching %r', dirpattern) - else: # pragma: no cover - # This should never happen, as it should be caught in - # _parse_template_line - raise DistlibException( - 'invalid action %r' % action) - - # - # Private API - # - - def _parse_directive(self, directive): - """ - Validate a directive. - :param directive: The directive to validate. - :return: A tuple of action, patterns, thedir, dir_patterns - """ - words = directive.split() - if len(words) == 1 and words[0] not in ('include', 'exclude', - 'global-include', - 'global-exclude', - 'recursive-include', - 'recursive-exclude', - 'graft', 'prune'): - # no action given, let's use the default 'include' - words.insert(0, 'include') - - action = words[0] - patterns = thedir = dir_pattern = None - - if action in ('include', 'exclude', - 'global-include', 'global-exclude'): - if len(words) < 2: - raise DistlibException( - '%r expects ...' % action) - - patterns = [convert_path(word) for word in words[1:]] - - elif action in ('recursive-include', 'recursive-exclude'): - if len(words) < 3: - raise DistlibException( - '%r expects
...' % action) - - thedir = convert_path(words[1]) - patterns = [convert_path(word) for word in words[2:]] - - elif action in ('graft', 'prune'): - if len(words) != 2: - raise DistlibException( - '%r expects a single ' % action) - - dir_pattern = convert_path(words[1]) - - else: - raise DistlibException('unknown action %r' % action) - - return action, patterns, thedir, dir_pattern - - def _include_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Select strings (presumably filenames) from 'self.files' that - match 'pattern', a Unix-style wildcard (glob) pattern. - - Patterns are not quite the same as implemented by the 'fnmatch' - module: '*' and '?' match non-special characters, where "special" - is platform-dependent: slash on Unix; colon, slash, and backslash on - DOS/Windows; and colon on Mac OS. - - If 'anchor' is true (the default), then the pattern match is more - stringent: "*.py" will match "foo.py" but not "foo/bar.py". If - 'anchor' is false, both of these will match. - - If 'prefix' is supplied, then only filenames starting with 'prefix' - (itself a pattern) and ending with 'pattern', with anything in between - them, will match. 'anchor' is ignored in this case. - - If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and - 'pattern' is assumed to be either a string containing a regex or a - regex object -- no translation is done, the regex is just compiled - and used as-is. - - Selected strings will be added to self.files. - - Return True if files are found. - """ - # XXX docstring lying about what the special chars are? - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - - # delayed loading of allfiles list - if self.allfiles is None: - self.findall() - - for name in self.allfiles: - if pattern_re.search(name): - self.files.add(name) - found = True - return found - - def _exclude_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Remove strings (presumably filenames) from 'files' that match - 'pattern'. - - Other parameters are the same as for 'include_pattern()', above. - The list 'self.files' is modified in place. Return True if files are - found. - - This API is public to allow e.g. exclusion of SCM subdirs, e.g. when - packaging source distributions - """ - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - for f in list(self.files): - if pattern_re.search(f): - self.files.remove(f) - found = True - return found - - def _translate_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Translate a shell-like wildcard pattern to a compiled regular - expression. - - Return the compiled regex. If 'is_regex' true, - then 'pattern' is directly compiled to a regex (if it's a string) - or just returned as-is (assumes it's a regex object). - """ - if is_regex: - if isinstance(pattern, str): - return re.compile(pattern) - else: - return pattern - - if _PYTHON_VERSION > (3, 2): - # ditch start and end characters - start, _, end = self._glob_to_re('_').partition('_') - - if pattern: - pattern_re = self._glob_to_re(pattern) - if _PYTHON_VERSION > (3, 2): - assert pattern_re.startswith(start) and pattern_re.endswith(end) - else: - pattern_re = '' - - base = re.escape(os.path.join(self.base, '')) - if prefix is not None: - # ditch end of pattern character - if _PYTHON_VERSION <= (3, 2): - empty_pattern = self._glob_to_re('') - prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] - else: - prefix_re = self._glob_to_re(prefix) - assert prefix_re.startswith(start) and prefix_re.endswith(end) - prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] - sep = os.sep - if os.sep == '\\': - sep = r'\\' - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + sep.join((prefix_re, - '.*' + pattern_re)) - else: - pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] - pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, - pattern_re, end) - else: # no prefix -- respect anchor flag - if anchor: - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + pattern_re - else: - pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) - - return re.compile(pattern_re) - - def _glob_to_re(self, pattern): - """Translate a shell-like glob pattern to a regular expression. - - Return a string containing the regex. Differs from - 'fnmatch.translate()' in that '*' does not match "special characters" - (which are platform-specific). - """ - pattern_re = fnmatch.translate(pattern) - - # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which - # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, - # and by extension they shouldn't match such "special characters" under - # any OS. So change all non-escaped dots in the RE to match any - # character except the special characters (currently: just os.sep). - sep = os.sep - if os.sep == '\\': - # we're using a regex to manipulate a regex, so we need - # to escape the backslash twice - sep = r'\\\\' - escaped = r'\1[^%s]' % sep - pattern_re = re.sub(r'((? 1 else cv2.INTER_AREA - img = cv2.resize(input_image, (W, H), interpolation=interpolation) - return img diff --git a/spaces/CikeyQI/Yunzai/Yunzai/renderers/puppeteer/index.js b/spaces/CikeyQI/Yunzai/Yunzai/renderers/puppeteer/index.js deleted file mode 100644 index 1684f12126ed474ceab8ceba5920159b9116ae23..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/Yunzai/Yunzai/renderers/puppeteer/index.js +++ /dev/null @@ -1,14 +0,0 @@ -import Puppeteer from './lib/puppeteer.js' - -/** - * - * @param config 本地config.yaml的配置内容 - * @returns renderer 渲染器对象 - * @returns renderer.id 渲染器ID,对应renderer中选择的id - * @returns renderer.type 渲染类型,保留字段,暂时支持image - * @returns renderer.render 渲染入口 - */ -export default function (config) { - // TODO Puppeteer待简化重构 - return new Puppeteer(config) -} \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/module-a3cf0cc4.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/module-a3cf0cc4.js deleted file mode 100644 index f6ae7d751ba2fcbcc91f751a82c4280eb2369128..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/module-a3cf0cc4.js +++ /dev/null @@ -1,2 +0,0 @@ -const w=t=>n=>{const e=t(n);return n.add(e),e},N=t=>(n,e)=>(t.set(n,e),e),f=Number.MAX_SAFE_INTEGER===void 0?9007199254740991:Number.MAX_SAFE_INTEGER,g=536870912,_=g*2,O=(t,n)=>e=>{const r=n.get(e);let s=r===void 0?e.size:r<_?r+1:0;if(!e.has(s))return t(e,s);if(e.sizef)throw new Error("Congratulations, you created a collection of unique numbers which uses all available integers!");for(;e.has(s);)s=Math.floor(Math.random()*f);return t(e,s)},M=new WeakMap,m=N(M),h=O(m,M),I=w(h),R=t=>typeof t.start=="function",p=new WeakMap,A=t=>({...t,connect:({call:n})=>async()=>{const{port1:e,port2:r}=new MessageChannel,s=await n("connect",{port:e},[e]);return p.set(r,s),r},disconnect:({call:n})=>async e=>{const r=p.get(e);if(r===void 0)throw new Error("The given port is not connected.");await n("disconnect",{portId:r})},isSupported:({call:n})=>()=>n("isSupported")}),E=new WeakMap,b=t=>{if(E.has(t))return E.get(t);const n=new Map;return E.set(t,n),n},W=t=>{const n=A(t);return e=>{const r=b(e);e.addEventListener("message",({data:o})=>{const{id:a}=o;if(a!==null&&r.has(a)){const{reject:u,resolve:c}=r.get(a);r.delete(a),o.error===void 0?c(o.result):u(new Error(o.error.message))}}),R(e)&&e.start();const s=(o,a=null,u=[])=>new Promise((c,l)=>{const d=h(r);r.set(d,{reject:l,resolve:c}),a===null?e.postMessage({id:d,method:o},u):e.postMessage({id:d,method:o,params:a},u)}),T=(o,a,u=[])=>{e.postMessage({id:null,method:o,params:a},u)};let i={};for(const[o,a]of Object.entries(n))i={...i,[o]:a({call:s,notify:T})};return{...i}}};export{I as a,W as c,h as g}; -//# sourceMappingURL=module-a3cf0cc4.js.map diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/filter.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/filter.py deleted file mode 100644 index 6e95ec7c48be2ab545cbd7bab5b33c93ada38022..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/filter.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -@date: 2021/7/5 -@description: -""" -import json -import math -import shutil - -import numpy as np -from utils.boundary import * -import dataset -import os -from tqdm import tqdm -from PIL import Image -from visualization.boundary import * -from visualization.floorplan import * -from shapely.geometry import Polygon, Point - - -def filter_center(ceil_corners): - xyz = uv2xyz(ceil_corners, plan_y=1.6) - xz = xyz[:, ::2] - poly = Polygon(xz).buffer(-0.01) - return poly.contains(Point(0, 0)) - - -def filter_boundary(corners): - if is_ceil_boundary(corners): - return True - elif is_floor_boundary(corners): - return True - else: - # An intersection occurs and an exception is considered - return False - - -def filter_self_intersection(corners): - xz = uv2xyz(corners)[:, ::2] - poly = Polygon(xz) - return poly.is_valid - - -def filter_dataset(dataset, show=False, output_dir=None): - if output_dir is None: - output_dir = os.path.join(dataset.root_dir, dataset.mode) - output_img_dir = os.path.join(output_dir, 'img_align') - output_label_dir = os.path.join(output_dir, 'label_cor_align') - else: - output_dir = os.path.join(output_dir, dataset.mode) - output_img_dir = os.path.join(output_dir, 'img') - output_label_dir = os.path.join(output_dir, 'label_cor') - - if not os.path.exists(output_img_dir): - os.makedirs(output_img_dir) - - if not os.path.exists(output_label_dir): - os.makedirs(output_label_dir) - - bar = tqdm(dataset, total=len(dataset)) - for data in bar: - name = data['name'] - bar.set_description(f"Processing {name}") - img = data['img'] - corners = data['corners'] - - if not filter_center(corners[1::2]): - if show: - draw_boundaries(img, corners_list=[corners[0::2], corners[1::2]], show=True) - if not os.path.exists(data['img_path']): - print("already remove") - else: - print(f"move {name}") - shutil.move(data['img_path'], os.path.join(output_img_dir, os.path.basename(data['img_path']))) - shutil.move(data['label_path'], os.path.join(output_label_dir, os.path.basename(data['label_path']))) - - -def execute_filter_dataset(root_dir, dataset_name="PanoS2D3DDataset", modes=None, output_dir=None): - if modes is None: - modes = ["train", "test", "valid"] - - for mode in modes: - print("mode: {}".format(mode)) - - filter_dataset(getattr(dataset, dataset_name)(root_dir, mode), show=False, output_dir=output_dir) - - -if __name__ == '__main__': - execute_filter_dataset(root_dir='/root/data/hd/hnet_dataset', - dataset_name="PanoS2D3DDataset", modes=['train', "test", "valid"], - output_dir='/root/data/hd/hnet_dataset_close') diff --git a/spaces/Dauzy/whisper-webui/docs/colab.md b/spaces/Dauzy/whisper-webui/docs/colab.md deleted file mode 100644 index 3fcdb835327238764fb643b9bbd2e27b6e14f58c..0000000000000000000000000000000000000000 --- a/spaces/Dauzy/whisper-webui/docs/colab.md +++ /dev/null @@ -1,20 +0,0 @@ -# Running Whisper on Google Colab - -If you don't have a decent GPU or any experience in running command-line applications, you might want to try this Google Colab instead: - -* [Google Colab - Whisper WebUI GPU](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing) -* [Screenshots](https://imgur.com/a/ZfY6uBO) - -The runtime (Runtime -> Change runtime type -> Hardware accelerator) should already be set top GPU. But if not, change it to GPU. - -Then, sign in to Google if you haven't already. Next, click on "Connect" at the top right. - -Under "Checking out WebUI from Git", click on the [play icon](https://imgur.com/a/81gOLyD) that appears in "[ ]" at the left. If you get a warning, click "Run anyway". - -After this step has completed, it should be get a green check mark. Then move on to the next section under "Installing dependencies", and click in "[ ]" again. This might take approximately 30 seconds. - -Once this has completed, scroll down to the "Run WebUI" section, and click on "[ ]". This will launch the WebUI in a shared link (expires in 72 hours). To open the UI, click on the link next to "Running on public URL", which will be something like https://12xxx.gradio.app/ - -The audio length in this version is not restricted, and it will run much faster as it is backed by a GPU. You can also run it using the "Large" model. Also note that it might take some time to start the model the first time, as it may need to download a 2.8 GB file on Google's servers. - -Once you're done, you can close the WebUI session by clicking the animated close button under "Run WebUI". You can also do this if you encounter any errors and need to restart the UI. You should also go to "Manage Sessions" and terminate the session, otherwise you may end up using all your free compute credits. \ No newline at end of file diff --git a/spaces/DianXian/Real-CUGAN/README.md b/spaces/DianXian/Real-CUGAN/README.md deleted file mode 100644 index fabd7e43bcbbd3717afbcde561ba4ab307b30c74..0000000000000000000000000000000000000000 --- a/spaces/DianXian/Real-CUGAN/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Real CUGAN -emoji: 🐢 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/optimizer.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/optimizer.py deleted file mode 100644 index 6ed88cb236365234597f8734299fbb315c56cc73..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/optimizer.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Helper wrapper for a Tensorflow optimizer.""" - -import numpy as np -import tensorflow as tf - -from collections import OrderedDict -from typing import List, Union - -from . import autosummary -from . import tfutil -from .. import util - -from .tfutil import TfExpression, TfExpressionEx - -try: - # TensorFlow 1.13 - from tensorflow.python.ops import nccl_ops -except: - # Older TensorFlow versions - import tensorflow.contrib.nccl as nccl_ops - -class Optimizer: - """A Wrapper for tf.train.Optimizer. - - Automatically takes care of: - - Gradient averaging for multi-GPU training. - - Dynamic loss scaling and typecasts for FP16 training. - - Ignoring corrupted gradients that contain NaNs/Infs. - - Reporting statistics. - - Well-chosen default settings. - """ - - def __init__(self, - name: str = "Train", - tf_optimizer: str = "tf.train.AdamOptimizer", - learning_rate: TfExpressionEx = 0.001, - use_loss_scaling: bool = False, - loss_scaling_init: float = 64.0, - loss_scaling_inc: float = 0.0005, - loss_scaling_dec: float = 1.0, - **kwargs): - - # Init fields. - self.name = name - self.learning_rate = tf.convert_to_tensor(learning_rate) - self.id = self.name.replace("/", ".") - self.scope = tf.get_default_graph().unique_name(self.id) - self.optimizer_class = util.get_obj_by_name(tf_optimizer) - self.optimizer_kwargs = dict(kwargs) - self.use_loss_scaling = use_loss_scaling - self.loss_scaling_init = loss_scaling_init - self.loss_scaling_inc = loss_scaling_inc - self.loss_scaling_dec = loss_scaling_dec - self._grad_shapes = None # [shape, ...] - self._dev_opt = OrderedDict() # device => optimizer - self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...] - self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor) - self._updates_applied = False - - def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None: - """Register the gradients of the given loss function with respect to the given variables. - Intended to be called once per GPU.""" - assert not self._updates_applied - - # Validate arguments. - if isinstance(trainable_vars, dict): - trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars - - assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1 - assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss]) - - if self._grad_shapes is None: - self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars] - - assert len(trainable_vars) == len(self._grad_shapes) - assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes)) - - dev = loss.device - - assert all(var.device == dev for var in trainable_vars) - - # Register device and compute gradients. - with tf.name_scope(self.id + "_grad"), tf.device(dev): - if dev not in self._dev_opt: - opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt) - assert callable(self.optimizer_class) - self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) - self._dev_grads[dev] = [] - - loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) - grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage - grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros - self._dev_grads[dev].append(grads) - - def apply_updates(self) -> tf.Operation: - """Construct training op to update the registered variables based on their gradients.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - self._updates_applied = True - devices = list(self._dev_grads.keys()) - total_grads = sum(len(grads) for grads in self._dev_grads.values()) - assert len(devices) >= 1 and total_grads >= 1 - ops = [] - - with tfutil.absolute_name_scope(self.scope): - # Cast gradients to FP32 and calculate partial sum within each device. - dev_grads = OrderedDict() # device => [(grad, var), ...] - - for dev_idx, dev in enumerate(devices): - with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev): - sums = [] - - for gv in zip(*self._dev_grads[dev]): - assert all(v is gv[0][1] for g, v in gv) - g = [tf.cast(g, tf.float32) for g, v in gv] - g = g[0] if len(g) == 1 else tf.add_n(g) - sums.append((g, gv[0][1])) - - dev_grads[dev] = sums - - # Sum gradients across devices. - if len(devices) > 1: - with tf.name_scope("SumAcrossGPUs"), tf.device(None): - for var_idx, grad_shape in enumerate(self._grad_shapes): - g = [dev_grads[dev][var_idx][0] for dev in devices] - - if np.prod(grad_shape): # nccl does not support zero-sized tensors - g = nccl_ops.all_sum(g) - - for dev, gg in zip(devices, g): - dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1]) - - # Apply updates separately on each device. - for dev_idx, (dev, grads) in enumerate(dev_grads.items()): - with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev): - # Scale gradients as needed. - if self.use_loss_scaling or total_grads > 1: - with tf.name_scope("Scale"): - coef = tf.constant(np.float32(1.0 / total_grads), name="coef") - coef = self.undo_loss_scaling(coef) - grads = [(g * coef, v) for g, v in grads] - - # Check for overflows. - with tf.name_scope("CheckOverflow"): - grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads])) - - # Update weights and adjust loss scaling. - with tf.name_scope("UpdateWeights"): - # pylint: disable=cell-var-from-loop - opt = self._dev_opt[dev] - ls_var = self.get_loss_scaling_var(dev) - - if not self.use_loss_scaling: - ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op)) - else: - ops.append(tf.cond(grad_ok, - lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)), - lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec)))) - - # Report statistics on the last device. - if dev == devices[-1]: - with tf.name_scope("Statistics"): - ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate)) - ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1))) - - if self.use_loss_scaling: - ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var)) - - # Initialize variables and group everything into a single op. - self.reset_optimizer_state() - tfutil.init_uninitialized_vars(list(self._dev_ls_var.values())) - - return tf.group(*ops, name="TrainingOp") - - def reset_optimizer_state(self) -> None: - """Reset internal state of the underlying optimizer.""" - tfutil.assert_tf_initialized() - tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()]) - - def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]: - """Get or create variable representing log2 of the current dynamic loss scaling factor.""" - if not self.use_loss_scaling: - return None - - if device not in self._dev_ls_var: - with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None): - self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var") - - return self._dev_ls_var[device] - - def apply_loss_scaling(self, value: TfExpression) -> TfExpression: - """Apply dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - - if not self.use_loss_scaling: - return value - - return value * tfutil.exp2(self.get_loss_scaling_var(value.device)) - - def undo_loss_scaling(self, value: TfExpression) -> TfExpression: - """Undo the effect of dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - - if not self.use_loss_scaling: - return value - - return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type diff --git a/spaces/Ekimetrics/climate-question-answering/climateqa/chat.py b/spaces/Ekimetrics/climate-question-answering/climateqa/chat.py deleted file mode 100644 index 5b11726b54fe6004947027fc290cda46ed5d619a..0000000000000000000000000000000000000000 --- a/spaces/Ekimetrics/climate-question-answering/climateqa/chat.py +++ /dev/null @@ -1,39 +0,0 @@ -# LANGCHAIN IMPORTS -from langchain import PromptTemplate, LLMChain -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.chains import RetrievalQAWithSourcesChain -from langchain.chains.qa_with_sources import load_qa_with_sources_chain - - -# CLIMATEQA -from climateqa.retriever import ClimateQARetriever -from climateqa.vectorstore import get_pinecone_vectorstore -from climateqa.chains import load_climateqa_chain - - -class ClimateQA: - def __init__(self,hf_embedding_model = "sentence-transformers/multi-qa-mpnet-base-dot-v1", - show_progress_bar = False,batch_size = 1,max_tokens = 1024,**kwargs): - - self.llm = self.get_llm(max_tokens = max_tokens,**kwargs) - self.embeddings_function = HuggingFaceEmbeddings( - model_name=hf_embedding_model, - encode_kwargs={"show_progress_bar":show_progress_bar,"batch_size":batch_size} - ) - - - - def get_vectorstore(self): - pass - - - def reformulate(self): - pass - - - def retrieve(self): - pass - - - def ask(self): - pass \ No newline at end of file diff --git a/spaces/EleutherAI/VQGAN_CLIP/taming-transformers/taming/data/faceshq.py b/spaces/EleutherAI/VQGAN_CLIP/taming-transformers/taming/data/faceshq.py deleted file mode 100644 index 6912d04b66a6d464c1078e4b51d5da290f5e767e..0000000000000000000000000000000000000000 --- a/spaces/EleutherAI/VQGAN_CLIP/taming-transformers/taming/data/faceshq.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -import numpy as np -import albumentations -from torch.utils.data import Dataset - -from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex - - -class FacesBase(Dataset): - def __init__(self, *args, **kwargs): - super().__init__() - self.data = None - self.keys = None - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - example = self.data[i] - ex = {} - if self.keys is not None: - for k in self.keys: - ex[k] = example[k] - else: - ex = example - return ex - - -class CelebAHQTrain(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/celebahq" - with open("data/celebahqtrain.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = NumpyPaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class CelebAHQValidation(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/celebahq" - with open("data/celebahqvalidation.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = NumpyPaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class FFHQTrain(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/ffhq" - with open("data/ffhqtrain.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class FFHQValidation(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/ffhq" - with open("data/ffhqvalidation.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class FacesHQTrain(Dataset): - # CelebAHQ [0] + FFHQ [1] - def __init__(self, size, keys=None, crop_size=None, coord=False): - d1 = CelebAHQTrain(size=size, keys=keys) - d2 = FFHQTrain(size=size, keys=keys) - self.data = ConcatDatasetWithIndex([d1, d2]) - self.coord = coord - if crop_size is not None: - self.cropper = albumentations.RandomCrop(height=crop_size,width=crop_size) - if self.coord: - self.cropper = albumentations.Compose([self.cropper], - additional_targets={"coord": "image"}) - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - ex, y = self.data[i] - if hasattr(self, "cropper"): - if not self.coord: - out = self.cropper(image=ex["image"]) - ex["image"] = out["image"] - else: - h,w,_ = ex["image"].shape - coord = np.arange(h*w).reshape(h,w,1)/(h*w) - out = self.cropper(image=ex["image"], coord=coord) - ex["image"] = out["image"] - ex["coord"] = out["coord"] - ex["class"] = y - return ex - - -class FacesHQValidation(Dataset): - # CelebAHQ [0] + FFHQ [1] - def __init__(self, size, keys=None, crop_size=None, coord=False): - d1 = CelebAHQValidation(size=size, keys=keys) - d2 = FFHQValidation(size=size, keys=keys) - self.data = ConcatDatasetWithIndex([d1, d2]) - self.coord = coord - if crop_size is not None: - self.cropper = albumentations.CenterCrop(height=crop_size,width=crop_size) - if self.coord: - self.cropper = albumentations.Compose([self.cropper], - additional_targets={"coord": "image"}) - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - ex, y = self.data[i] - if hasattr(self, "cropper"): - if not self.coord: - out = self.cropper(image=ex["image"]) - ex["image"] = out["image"] - else: - h,w,_ = ex["image"].shape - coord = np.arange(h*w).reshape(h,w,1)/(h*w) - out = self.cropper(image=ex["image"], coord=coord) - ex["image"] = out["image"] - ex["coord"] = out["coord"] - ex["class"] = y - return ex diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/utils.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/utils.py deleted file mode 100644 index f4805cdb25e7c50611412a19340ad525d1251d7b..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/utils.py +++ /dev/null @@ -1,121 +0,0 @@ -import json - -import numpy as np -import torch -from tqdm import tqdm - - -def load_data(file_name: str = "./infer/lib/uvr5_pack/name_params.json") -> dict: - with open(file_name, "r") as f: - data = json.load(f) - - return data - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def inference(X_spec, device, model, aggressiveness, data): - """ - data : dic configs - """ - - def _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True - ): - model.eval() - with torch.no_grad(): - preds = [] - - iterations = [n_window] - - total_iterations = sum(iterations) - for i in tqdm(range(n_window)): - start = i * roi_size - X_mag_window = X_mag_pad[ - None, :, :, start : start + data["window_size"] - ] - X_mag_window = torch.from_numpy(X_mag_window) - if is_half: - X_mag_window = X_mag_window.half() - X_mag_window = X_mag_window.to(device) - - pred = model.predict(X_mag_window, aggressiveness) - - pred = pred.detach().cpu().numpy() - preds.append(pred[0]) - - pred = np.concatenate(preds, axis=2) - return pred - - def preprocess(X_spec): - X_mag = np.abs(X_spec) - X_phase = np.angle(X_spec) - - return X_mag, X_phase - - X_mag, X_phase = preprocess(X_spec) - - coef = X_mag.max() - X_mag_pre = X_mag / coef - - n_frame = X_mag_pre.shape[2] - pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) - n_window = int(np.ceil(n_frame / roi_size)) - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - if list(model.state_dict().values())[0].dtype == torch.float16: - is_half = True - else: - is_half = False - pred = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred = pred[:, :, :n_frame] - - if data["tta"]: - pad_l += roi_size // 2 - pad_r += roi_size // 2 - n_window += 1 - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - pred_tta = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred_tta = pred_tta[:, :, roi_size // 2 :] - pred_tta = pred_tta[:, :, :n_frame] - - return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) - else: - return pred * coef, X_mag, np.exp(1.0j * X_phase) - - -def _get_name_params(model_path, model_hash): - data = load_data() - flag = False - ModelName = model_path - for type in list(data): - for model in list(data[type][0]): - for i in range(len(data[type][0][model])): - if str(data[type][0][model][i]["hash_name"]) == model_hash: - flag = True - elif str(data[type][0][model][i]["hash_name"]) in ModelName: - flag = True - - if flag: - model_params_auto = data[type][0][model][i]["model_params"] - param_name_auto = data[type][0][model][i]["param_name"] - if type == "equivalent": - return param_name_auto, model_params_auto - else: - flag = False - return param_name_auto, model_params_auto diff --git a/spaces/EuroPython2022/Face-Mask-Detection-with-YOLOS/README.md b/spaces/EuroPython2022/Face-Mask-Detection-with-YOLOS/README.md deleted file mode 100644 index 7dba991fd5ba74d4114495a2092f8842db129bae..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/Face-Mask-Detection-with-YOLOS/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Face Mask Detection With YOLOS -emoji: 😷 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textdet/psenet/README.md b/spaces/EuroPython2022/mmocr-demo/configs/textdet/psenet/README.md deleted file mode 100644 index b4293a3ce823c5dd285fda86dbc47b41465129b3..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/textdet/psenet/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# PSENet - -> [Shape robust text detection with progressive scale expansion network](https://arxiv.org/abs/1903.12473) - - - -## Abstract - -Scene text detection has witnessed rapid progress especially with the recent development of convolutional neural networks. However, there still exists two challenges which prevent the algorithm into industry applications. On the one hand, most of the state-of-art algorithms require quadrangle bounding box which is in-accurate to locate the texts with arbitrary shape. On the other hand, two text instances which are close to each other may lead to a false detection which covers both instances. Traditionally, the segmentation-based approach can relieve the first problem but usually fail to solve the second challenge. To address these two challenges, in this paper, we propose a novel Progressive Scale Expansion Network (PSENet), which can precisely detect text instances with arbitrary shapes. More specifically, PSENet generates the different scale of kernels for each text instance, and gradually expands the minimal scale kernel to the text instance with the complete shape. Due to the fact that there are large geometrical margins among the minimal scale kernels, our method is effective to split the close text instances, making it easier to use segmentation-based methods to detect arbitrary-shaped text instances. Extensive experiments on CTW1500, Total-Text, ICDAR 2015 and ICDAR 2017 MLT validate the effectiveness of PSENet. Notably, on CTW1500, a dataset full of long curve texts, PSENet achieves a F-measure of 74.3% at 27 FPS, and our best F-measure (82.2%) outperforms state-of-art algorithms by 6.6%. The code will be released in the future. - -
- -
- -## Results and models - -### CTW1500 - -| Method | Backbone | Extra Data | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :------------------------------------------------: | :------: | :--------: | :-----------: | :----------: | :-----: | :-------: | :-----------: | :-----------: | :-----------: | :--------------------------------------------------: | -| [PSENet-4s](configs/textdet/psenet/psenet_r50_fpnf_600e_ctw1500.py) | ResNet50 | - | CTW1500 Train | CTW1500 Test | 600 | 1280 | 0.728 (0.717) | 0.849 (0.852) | 0.784 (0.779) | [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_ctw1500_20210401-216fed50.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/psenet/20210401_215421.log.json) | - -### ICDAR2015 - -| Method | Backbone | Extra Data | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :----------------------------------: | :------: | :---------------------------------------: | :----------: | :-------: | :-----: | :-------: | :-----------: | :-----------: | :-----------: | :-------------------------------------: | -| [PSENet-4s](configs/textdet/psenet/psenet_r50_fpnf_600e_icdar2015.py) | ResNet50 | - | IC15 Train | IC15 Test | 600 | 2240 | 0.784 (0.753) | 0.831 (0.867) | 0.807 (0.806) | [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_icdar2015-c6131f0d.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/psenet/20210331_214145.log.json) | -| [PSENet-4s](configs/textdet/psenet/psenet_r50_fpnf_600e_icdar2015.py) | ResNet50 | pretrain on IC17 MLT [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_icdar2017_as_pretrain-3bd6056c.pth) | IC15 Train | IC15 Test | 600 | 2240 | 0.834 | 0.861 | 0.847 | [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_icdar2015_pretrain-eefd8fe6.pth) \| [log](<>) | - -```{note} -We've upgraded our IoU backend from `Polygon3` to `shapely`. There are some performance differences for some models due to the backends' different logics to handle invalid polygons (more info [here](https://github.com/open-mmlab/mmocr/issues/465)). **New evaluation result is presented in brackets** and new logs will be uploaded soon. -``` - -## Citation - -```bibtex -@inproceedings{wang2019shape, - title={Shape robust text detection with progressive scale expansion network}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Hou, Wenbo and Lu, Tong and Yu, Gang and Shao, Shuai}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={9336--9345}, - year={2019} -} -``` diff --git a/spaces/EveryPizza/Cartoony-Gradio-Theme/theme_dropdown.py b/spaces/EveryPizza/Cartoony-Gradio-Theme/theme_dropdown.py deleted file mode 100644 index 6235388fd00549553df44028f3ccf03e946994ea..0000000000000000000000000000000000000000 --- a/spaces/EveryPizza/Cartoony-Gradio-Theme/theme_dropdown.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import pathlib - -from gradio.themes.utils import ThemeAsset - - -def create_theme_dropdown(): - import gradio as gr - - asset_path = pathlib.Path(__file__).parent / "themes" - themes = [] - for theme_asset in os.listdir(str(asset_path)): - themes.append( - (ThemeAsset(theme_asset), gr.Theme.load(str(asset_path / theme_asset))) - ) - - def make_else_if(theme_asset): - return f""" - else if (theme == '{str(theme_asset[0].version)}') {{ - var theme_css = `{theme_asset[1]._get_theme_css()}` - }}""" - - head, tail = themes[0], themes[1:] - if_statement = f""" - if (theme == "{str(head[0].version)}") {{ - var theme_css = `{head[1]._get_theme_css()}` - }} {" ".join(make_else_if(t) for t in tail)} - """ - - latest_to_oldest = sorted([t[0] for t in themes], key=lambda asset: asset.version)[ - ::-1 - ] - latest_to_oldest = [str(t.version) for t in latest_to_oldest] - - component = gr.Dropdown( - choices=latest_to_oldest, - value=latest_to_oldest[0], - render=False, - label="Select Version", - ).style(container=False) - - return ( - component, - f""" - (theme) => {{ - if (!document.querySelector('.theme-css')) {{ - var theme_elem = document.createElement('style'); - theme_elem.classList.add('theme-css'); - document.head.appendChild(theme_elem); - }} else {{ - var theme_elem = document.querySelector('.theme-css'); - }} - {if_statement} - theme_elem.innerHTML = theme_css; - }} - """, - ) diff --git a/spaces/FlippFuzz/whisper-webui/cli.py b/spaces/FlippFuzz/whisper-webui/cli.py deleted file mode 100644 index 4db79d28d634a71051d99b0ee3e847c28333f3bb..0000000000000000000000000000000000000000 --- a/spaces/FlippFuzz/whisper-webui/cli.py +++ /dev/null @@ -1,169 +0,0 @@ -import argparse -import os -import pathlib -from urllib.parse import urlparse -import warnings -import numpy as np - -import torch -from app import WhisperTranscriber -from src.config import ApplicationConfig -from src.download import download_url -from src.languages import get_language_names - -from src.utils import optional_float, optional_int, str2bool -from src.whisper.whisperFactory import create_whisper_container - -def cli(): - app_config = ApplicationConfig.create_default() - whisper_models = app_config.get_model_names() - - # For the CLI, we fallback to saving the output to the current directory - output_dir = app_config.output_dir if app_config.output_dir is not None else "." - - # Environment variable overrides - default_whisper_implementation = os.environ.get("WHISPER_IMPLEMENTATION", app_config.whisper_implementation) - - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("audio", nargs="+", type=str, \ - help="audio file(s) to transcribe") - parser.add_argument("--model", default=app_config.default_model_name, choices=whisper_models, \ - help="name of the Whisper model to use") # medium - parser.add_argument("--model_dir", type=str, default=app_config.model_dir, \ - help="the path to save model files; uses ~/.cache/whisper by default") - parser.add_argument("--device", default=app_config.device, \ - help="device to use for PyTorch inference") - parser.add_argument("--output_dir", "-o", type=str, default=output_dir, \ - help="directory to save the outputs") - parser.add_argument("--verbose", type=str2bool, default=app_config.verbose, \ - help="whether to print out the progress and debug messages") - parser.add_argument("--whisper_implementation", type=str, default=default_whisper_implementation, choices=["whisper", "faster-whisper"],\ - help="the Whisper implementation to use") - - parser.add_argument("--task", type=str, default=app_config.task, choices=["transcribe", "translate"], \ - help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") - parser.add_argument("--language", type=str, default=app_config.language, choices=sorted(get_language_names()), \ - help="language spoken in the audio, specify None to perform language detection") - - parser.add_argument("--vad", type=str, default=app_config.default_vad, choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], \ - help="The voice activity detection algorithm to use") # silero-vad - parser.add_argument("--vad_merge_window", type=optional_float, default=app_config.vad_merge_window, \ - help="The window size (in seconds) to merge voice segments") - parser.add_argument("--vad_max_merge_size", type=optional_float, default=app_config.vad_max_merge_size,\ - help="The maximum size (in seconds) of a voice segment") - parser.add_argument("--vad_padding", type=optional_float, default=app_config.vad_padding, \ - help="The padding (in seconds) to add to each voice segment") - parser.add_argument("--vad_prompt_window", type=optional_float, default=app_config.vad_prompt_window, \ - help="The window size of the prompt to pass to Whisper") - parser.add_argument("--vad_cpu_cores", type=int, default=app_config.vad_cpu_cores, \ - help="The number of CPU cores to use for VAD pre-processing.") # 1 - parser.add_argument("--vad_parallel_devices", type=str, default=app_config.vad_parallel_devices, \ - help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") # "" - parser.add_argument("--auto_parallel", type=bool, default=app_config.auto_parallel, \ - help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") # False - - parser.add_argument("--temperature", type=float, default=app_config.temperature, \ - help="temperature to use for sampling") - parser.add_argument("--best_of", type=optional_int, default=app_config.best_of, \ - help="number of candidates when sampling with non-zero temperature") - parser.add_argument("--beam_size", type=optional_int, default=app_config.beam_size, \ - help="number of beams in beam search, only applicable when temperature is zero") - parser.add_argument("--patience", type=float, default=app_config.patience, \ - help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search") - parser.add_argument("--length_penalty", type=float, default=app_config.length_penalty, \ - help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple lengt normalization by default") - - parser.add_argument("--suppress_tokens", type=str, default=app_config.suppress_tokens, \ - help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations") - parser.add_argument("--initial_prompt", type=str, default=app_config.initial_prompt, \ - help="optional text to provide as a prompt for the first window.") - parser.add_argument("--condition_on_previous_text", type=str2bool, default=app_config.condition_on_previous_text, \ - help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop") - parser.add_argument("--fp16", type=str2bool, default=app_config.fp16, \ - help="whether to perform inference in fp16; True by default") - parser.add_argument("--compute_type", type=str, default=app_config.compute_type, choices=["default", "auto", "int8", "int8_float16", "int16", "float16", "float32"], \ - help="the compute type to use for inference") - - parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=app_config.temperature_increment_on_fallback, \ - help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below") - parser.add_argument("--compression_ratio_threshold", type=optional_float, default=app_config.compression_ratio_threshold, \ - help="if the gzip compression ratio is higher than this value, treat the decoding as failed") - parser.add_argument("--logprob_threshold", type=optional_float, default=app_config.logprob_threshold, \ - help="if the average log probability is lower than this value, treat the decoding as failed") - parser.add_argument("--no_speech_threshold", type=optional_float, default=app_config.no_speech_threshold, \ - help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence") - - args = parser.parse_args().__dict__ - model_name: str = args.pop("model") - model_dir: str = args.pop("model_dir") - output_dir: str = args.pop("output_dir") - device: str = args.pop("device") - os.makedirs(output_dir, exist_ok=True) - - whisper_implementation = args.pop("whisper_implementation") - print(f"Using {whisper_implementation} for Whisper") - - if model_name.endswith(".en") and args["language"] not in {"en", "English"}: - warnings.warn(f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead.") - args["language"] = "en" - - temperature = args.pop("temperature") - temperature_increment_on_fallback = args.pop("temperature_increment_on_fallback") - if temperature_increment_on_fallback is not None: - temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback)) - else: - temperature = [temperature] - - vad = args.pop("vad") - vad_merge_window = args.pop("vad_merge_window") - vad_max_merge_size = args.pop("vad_max_merge_size") - vad_padding = args.pop("vad_padding") - vad_prompt_window = args.pop("vad_prompt_window") - vad_cpu_cores = args.pop("vad_cpu_cores") - auto_parallel = args.pop("auto_parallel") - - compute_type = args.pop("compute_type") - - transcriber = WhisperTranscriber(delete_uploaded_files=False, vad_cpu_cores=vad_cpu_cores, app_config=app_config) - transcriber.set_parallel_devices(args.pop("vad_parallel_devices")) - transcriber.set_auto_parallel(auto_parallel) - - model = create_whisper_container(whisper_implementation=whisper_implementation, model_name=model_name, - device=device, compute_type=compute_type, download_root=model_dir, models=app_config.models) - - if (transcriber._has_parallel_devices()): - print("Using parallel devices:", transcriber.parallel_device_list) - - for audio_path in args.pop("audio"): - sources = [] - - # Detect URL and download the audio - if (uri_validator(audio_path)): - # Download from YouTube/URL directly - for source_path in download_url(audio_path, maxDuration=-1, destinationDirectory=output_dir, playlistItems=None): - source_name = os.path.basename(source_path) - sources.append({ "path": source_path, "name": source_name }) - else: - sources.append({ "path": audio_path, "name": os.path.basename(audio_path) }) - - for source in sources: - source_path = source["path"] - source_name = source["name"] - - result = transcriber.transcribe_file(model, source_path, temperature=temperature, - vad=vad, vadMergeWindow=vad_merge_window, vadMaxMergeSize=vad_max_merge_size, - vadPadding=vad_padding, vadPromptWindow=vad_prompt_window, **args) - - transcriber.write_result(result, source_name, output_dir) - - transcriber.close() - -def uri_validator(x): - try: - result = urlparse(x) - return all([result.scheme, result.netloc]) - except: - return False - -if __name__ == '__main__': - cli() \ No newline at end of file diff --git a/spaces/GXSA/bingo/src/components/ui/codeblock.tsx b/spaces/GXSA/bingo/src/components/ui/codeblock.tsx deleted file mode 100644 index aabda4e3b59f4e36b6ab79feb19d8d18b70e881b..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/ui/codeblock.tsx +++ /dev/null @@ -1,142 +0,0 @@ -'use client' - -import { FC, memo } from 'react' -import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter' -import { coldarkDark } from 'react-syntax-highlighter/dist/cjs/styles/prism' - -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' -import { IconCheck, IconCopy, IconDownload } from '@/components/ui/icons' -import { Button } from '@/components/ui/button' - -interface Props { - language: string - value: string -} - -interface languageMap { - [key: string]: string | undefined -} - -export const programmingLanguages: languageMap = { - javascript: '.js', - python: '.py', - java: '.java', - c: '.c', - cpp: '.cpp', - 'c++': '.cpp', - 'c#': '.cs', - ruby: '.rb', - php: '.php', - swift: '.swift', - 'objective-c': '.m', - kotlin: '.kt', - typescript: '.ts', - go: '.go', - perl: '.pl', - rust: '.rs', - scala: '.scala', - haskell: '.hs', - lua: '.lua', - shell: '.sh', - sql: '.sql', - html: '.html', - css: '.css' - // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component -} - -export const generateRandomString = (length: number, lowercase = false) => { - const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789' // excluding similar looking characters like Z, 2, I, 1, O, 0 - let result = '' - for (let i = 0; i < length; i++) { - result += chars.charAt(Math.floor(Math.random() * chars.length)) - } - return lowercase ? result.toLowerCase() : result -} - -const CodeBlock: FC = memo(({ language, value }) => { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - - const downloadAsFile = () => { - if (typeof window === 'undefined') { - return - } - const fileExtension = programmingLanguages[language] || '.file' - const suggestedFileName = `file-${generateRandomString( - 3, - true - )}${fileExtension}` - const fileName = window.prompt('Enter file name' || '', suggestedFileName) - - if (!fileName) { - // User pressed cancel on prompt. - return - } - - const blob = new Blob([value], { type: 'text/plain' }) - const url = URL.createObjectURL(blob) - const link = document.createElement('a') - link.download = fileName - link.href = url - link.style.display = 'none' - document.body.appendChild(link) - link.click() - document.body.removeChild(link) - URL.revokeObjectURL(url) - } - - const onCopy = () => { - if (isCopied) return - copyToClipboard(value) - } - - return ( -
-
- {language} -
- - -
-
- - {value} - -
- ) -}) -CodeBlock.displayName = 'CodeBlock' - -export { CodeBlock } diff --git a/spaces/GaenKoki/voicevox/voicevox_engine/cancellable_engine.py b/spaces/GaenKoki/voicevox/voicevox_engine/cancellable_engine.py deleted file mode 100644 index 1bedb3ff3ebce858d8c585cf8b0d121a4d816210..0000000000000000000000000000000000000000 --- a/spaces/GaenKoki/voicevox/voicevox_engine/cancellable_engine.py +++ /dev/null @@ -1,220 +0,0 @@ -import argparse -import asyncio -import queue -from multiprocessing import Pipe, Process -from multiprocessing.connection import Connection -from tempfile import NamedTemporaryFile -from typing import List, Optional, Tuple - -import soundfile - -# FIXME: remove FastAPI dependency -from fastapi import HTTPException, Request - -from .model import AudioQuery -from .synthesis_engine import make_synthesis_engines -from .utility import get_latest_core_version - - -class CancellableEngine: - """ - 音声合成のキャンセル機能に関するクラス - 初期化後は、synthesis関数で音声合成できる - (オリジナルと比べ引数が増えているので注意) - - Attributes - ---------- - watch_con_list: List[Tuple[Request, Process]] - Requestは接続の監視に使用され、Processは通信切断時のプロセスキルに使用される - クライアントから接続があるとListにTupleが追加される - 接続が切断、もしくは音声合成が終了すると削除される - procs_and_cons: queue.Queue[Tuple[Process, Connection]] - 音声合成の準備が終わっているプロセスのList - (音声合成中のプロセスは入っていない) - """ - - def __init__(self, args: argparse.Namespace) -> None: - """ - 変数の初期化を行う - また、args.init_processesの数だけプロセスを起動し、procs_and_consに格納する - """ - self.args = args - if not self.args.enable_cancellable_synthesis: - raise HTTPException( - status_code=404, - detail="実験的機能はデフォルトで無効になっています。使用するには引数を指定してください。", - ) - - self.watch_con_list: List[Tuple[Request, Process]] = [] - self.procs_and_cons: queue.Queue[Tuple[Process, Connection]] = queue.Queue() - for _ in range(self.args.init_processes): - self.procs_and_cons.put(self.start_new_proc()) - - def start_new_proc( - self, - ) -> Tuple[Process, Connection]: - """ - 新しく開始したプロセスを返す関数 - - Returns - ------- - ret_proc: Process - 新規のプロセス - sub_proc_con1: Connection - ret_procのプロセスと通信するためのPipe - """ - sub_proc_con1, sub_proc_con2 = Pipe(True) - ret_proc = Process( - target=start_synthesis_subprocess, - kwargs={ - "args": self.args, - "sub_proc_con": sub_proc_con2, - }, - daemon=True, - ) - ret_proc.start() - return ret_proc, sub_proc_con1 - - def finalize_con( - self, - req: Request, - proc: Process, - sub_proc_con: Optional[Connection], - ) -> None: - """ - 接続が切断された時の処理を行う関数 - watch_con_listからの削除、プロセスの後処理を行う - プロセスが生きている場合はそのままprocs_and_consに加える - 死んでいる場合は新しく生成したものをprocs_and_consに加える - - Parameters - ---------- - req: fastapi.Request - 接続確立時に受け取ったものをそのまま渡せばよい - https://fastapi.tiangolo.com/advanced/using-request-directly/ - proc: Process - 音声合成を行っていたプロセス - sub_proc_con: Connection, optional - 音声合成を行っていたプロセスとのPipe - 指定されていない場合、プロセスは再利用されず終了される - """ - try: - self.watch_con_list.remove((req, proc)) - except ValueError: - pass - try: - if not proc.is_alive() or sub_proc_con is None: - proc.close() - raise ValueError - # プロセスが死んでいない場合は再利用する - self.procs_and_cons.put((proc, sub_proc_con)) - except ValueError: - # プロセスが死んでいるので新しく作り直す - self.procs_and_cons.put(self.start_new_proc()) - - def _synthesis_impl( - self, - query: AudioQuery, - speaker_id: int, - request: Request, - core_version: Optional[str], - ) -> str: - """ - 音声合成を行う関数 - 通常エンジンの引数に比べ、requestが必要になっている - また、返り値がファイル名になっている - - Parameters - ---------- - query: AudioQuery - speaker_id: int - request: fastapi.Request - 接続確立時に受け取ったものをそのまま渡せばよい - https://fastapi.tiangolo.com/advanced/using-request-directly/ - core_version: str - - Returns - ------- - f_name: str - 生成された音声ファイルの名前 - """ - proc, sub_proc_con1 = self.procs_and_cons.get() - self.watch_con_list.append((request, proc)) - try: - sub_proc_con1.send((query, speaker_id, core_version)) - f_name = sub_proc_con1.recv() - except EOFError: - raise HTTPException(status_code=422, detail="既にサブプロセスは終了されています") - except Exception: - self.finalize_con(request, proc, sub_proc_con1) - raise - - self.finalize_con(request, proc, sub_proc_con1) - return f_name - - async def catch_disconnection(self): - """ - 接続監視を行うコルーチン - """ - while True: - await asyncio.sleep(1) - for con in self.watch_con_list: - req, proc = con - if await req.is_disconnected(): - try: - if proc.is_alive(): - proc.terminate() - proc.join() - proc.close() - except ValueError: - pass - finally: - self.finalize_con(req, proc, None) - - -def start_synthesis_subprocess( - args: argparse.Namespace, - sub_proc_con: Connection, -): - """ - 音声合成を行うサブプロセスで行うための関数 - pickle化の関係でグローバルに書いている - - Parameters - ---------- - args: argparse.Namespace - 起動時に作られたものをそのまま渡す - sub_proc_con: Connection - メインプロセスと通信するためのPipe - """ - - synthesis_engines = make_synthesis_engines( - use_gpu=args.use_gpu, - voicelib_dirs=args.voicelib_dir, - voicevox_dir=args.voicevox_dir, - runtime_dirs=args.runtime_dir, - cpu_num_threads=args.cpu_num_threads, - enable_mock=args.enable_mock, - ) - assert len(synthesis_engines) != 0, "音声合成エンジンがありません。" - latest_core_version = get_latest_core_version(versions=synthesis_engines.keys()) - while True: - try: - query, speaker_id, core_version = sub_proc_con.recv() - if core_version is None: - _engine = synthesis_engines[latest_core_version] - elif core_version in synthesis_engines: - _engine = synthesis_engines[core_version] - else: - # バージョンが見つからないエラー - sub_proc_con.send("") - continue - wave = _engine._synthesis_impl(query, speaker_id) - with NamedTemporaryFile(delete=False) as f: - soundfile.write( - file=f, data=wave, samplerate=query.outputSamplingRate, format="WAV" - ) - sub_proc_con.send(f.name) - except Exception: - sub_proc_con.close() - raise diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/symmetric_block_bridge_construction.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/symmetric_block_bridge_construction.py deleted file mode 100644 index 29c8bef1b472b9f3b04149815e7caf3d1c3439a6..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/symmetric_block_bridge_construction.py +++ /dev/null @@ -1,87 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils -import pybullet as p - -class SymmetricBlockBridgeConstruction(Task): - """Create a symmetrical bridge-shaped structure on a stand using eight blocks of two different colors (four red and four blue).""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "create a symmetrical bridge-shaped structure on a stand using eight blocks of two different colors (four red and four blue)" - self.task_completed_desc = "done building the bridge." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add base. - base_size = (0.05, 0.15, 0.005) - base_urdf = 'stacking/stand.urdf' - base_pose = self.get_random_pose(env, base_size) - env.add_object(base_urdf, base_pose, category='fixed') - - # Block colors. - colors = [utils.COLORS['red'], utils.COLORS['blue']] - - # Add blocks. - # x, y, z dimensions for the asset size - block_size = (0.04, 0.04, 0.04) - block_urdf = 'stacking/block.urdf' - - objs = [] - for i in range(8): - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=colors[i%2]) - objs.append(block_id) - - # Associate placement locations for goals. - place_pos = [(0, -0.05, 0.03), (0, 0, 0.03), - (0, 0.05, 0.03), (0, -0.025, 0.08), - (0, 0.025, 0.08), (0, 0, 0.13), - (0, -0.025, 0.18), (0, 0.025, 0.18)] - targs = [(utils.apply(base_pose, i), base_pose[1]) for i in place_pos] - - # Goal: blocks are stacked in a bridge (bottom row: red, red). - self.add_goal(objs=objs[:2], matches=np.ones((2, 2)), targ_poses=targs[:2], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2]*2, - language_goal=self.lang_template) - - # Goal: blocks are stacked in a bridge (second row: blue). - self.add_goal(objs=objs[2:3], matches=np.ones((1, 1)), targ_poses=targs[2:3], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) - - # Goal: blocks are stacked in a bridge (third row: red). - self.add_goal(objs=objs[3:4], matches=np.ones((1, 1)), targ_poses=targs[3:4], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) - - # Goal: blocks are stacked in a bridge (fourth row: blue). - self.add_goal(objs=objs[4:5], matches=np.ones((1, 1)), targ_poses=targs[4:5], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) - - # Goal: blocks are stacked in a bridge (fifth row: red). - self.add_goal(objs=objs[5:6], matches=np.ones((1, 1)), targ_poses=targs[5:6], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) - - # Goal: blocks are stacked in a bridge (sixth row: blue). - self.add_goal(objs=objs[6:7], matches=np.ones((1, 1)), targ_poses=targs[6:7], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) - - # Goal: blocks are stacked in a bridge (top row: red, red). - self.add_goal(objs=objs[7:], matches=np.ones((1, 1)), targ_poses=targs[7:], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_goal_small.sh b/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_goal_small.sh deleted file mode 100644 index 5a2fd6d956012f8e014fd90625c5a64e34bb3889..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_goal_small.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -DATA_DIR=$1 -TRAINTASK=${2-'[rainbow-stack,bowl-ball-placement]'} -TESTTASK=${3-'[rainbow-stack,bowl-ball-placement]'} -TASKNAME=${4-'mix-two'} -STEPS=${5-'10000'} - -DISP=False - -echo "Training multi-task dataset... Folder: $DATA_DIR Task $TRAINTASK" - -# You can parallelize these depending on how much resources you have - -############################# -## Language-Conditioned Tasks -# [align-rope,assembling-kits-seq-seen-colors,assembling-kits-seq-unseen-colors,packing-shapes,stack-block-pyramid-seq-unseen-colors, -# separating-piles-seen-colors,separating-piles-unseen-colors,towers-of-hanoi-seq-seen-colors,towers-of-hanoi-seq-unseen-colors] - -# example: sh scripts/traintest_scripts/train_test_multi_task_indistribution.sh data "[align-rope,sweeping-piles,align-box-corner,block-insertion,manipulating-rope,place-red-in-green]" 6taskindomain -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope,sweeping-piles,align-box-corner,block-insertion,manipulating-rope,place-red-in-green]" "[towers-of-hanoi]" 6taskgen -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope,sweeping-piles,align-box-corner]" "[towers-of-hanoi]" 3taskgen -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope]" "[towers-of-hanoi]" 1taskgen -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope,sweeping-piles,align-box-corner,block-insertion,manipulating-rope,place-red-in-green]" "[towers-of-hanoi]" 10taskgen - -trap "kill 0" SIGINT -# increase the nums of epoch -python cliport/train.py train.task=$TRAINTASK \ - train.agent=cliport \ - train.model_task=$TASKNAME \ - train.attn_stream_fusion_type=add \ - train.trans_stream_fusion_type=conv \ - train.lang_fusion_type=mult \ - train.n_demos=50 \ - train.n_steps=${STEPS} \ - dataset.cache=True \ - train.exp_folder=exps/exp-$TASKNAME-small \ - dataset.type=multi \ - train.load_from_last_ckpt=False - - -# Convert Python list to Bash array -bash_array=$(python3 -c "import sys; print(' '.join((sys.argv[1])[1:-1].split(',')))" "$TESTTASK") - -# Convert the space-separated string to a bash array -echo "Testing multi-task dataset... Folder: $DATA_DIR Task $TESTTASK" - - -for task in $bash_array - do - echo "Testing $task" - # TEST - # bash scripts/generate_gpt_datasets.sh data $task - - python cliport/eval.py model_task=$TASKNAME \ - eval_task=$task \ - agent=cliport \ - mode=test \ - n_demos=100 \ - train_demos=50 \ - checkpoint_type=test_best \ - type=single \ - exp_folder=exps/exp-$TASKNAME-small \ - update_results=True & - done -wait - -python notebooks/print_results.py -r=exps/exp-$TASKNAME-small - -echo "Finished Training." \ No newline at end of file diff --git "a/spaces/Gmq-x/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" "b/spaces/Gmq-x/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" deleted file mode 100644 index a564f21d231cd65c29b539573929ca5d2df63203..0000000000000000000000000000000000000000 --- "a/spaces/Gmq-x/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" +++ /dev/null @@ -1,54 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - -def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - - i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```' - i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - if not fast_debug: - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] - - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/README.md b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/README.md deleted file mode 100644 index 7b166152fdfc5464fb7dd5e39c678cd735294b27..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Asymmetric Non-local Neural Networks for Semantic Segmentation - -## Introduction - - - -```latex -@inproceedings{annn, - author = {Zhen Zhu and - Mengde Xu and - Song Bai and - Tengteng Huang and - Xiang Bai}, - title = {Asymmetric Non-local Neural Networks for Semantic Segmentation}, - booktitle={International Conference on Computer Vision}, - year = {2019}, - url = {http://arxiv.org/abs/1908.07678}, -} -``` - -## Results and models - -### Cityscapes - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| ANN | R-50-D8 | 512x1024 | 40000 | 6 | 3.71 | 77.40 | 78.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211.log.json) | -| ANN | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.55 | 76.55 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243.log.json) | -| ANN | R-50-D8 | 769x769 | 40000 | 6.8 | 1.70 | 78.89 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712.log.json) | -| ANN | R-101-D8 | 769x769 | 40000 | 10.7 | 1.15 | 79.32 | 80.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720.log.json) | -| ANN | R-50-D8 | 512x1024 | 80000 | - | - | 77.34 | 78.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911.log.json) | -| ANN | R-101-D8 | 512x1024 | 80000 | - | - | 77.14 | 78.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728.log.json) | -| ANN | R-50-D8 | 769x769 | 80000 | - | - | 78.88 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426.log.json) | -| ANN | R-101-D8 | 769x769 | 80000 | - | - | 78.80 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713.log.json) | - -### ADE20K - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| ANN | R-50-D8 | 512x512 | 80000 | 9.1 | 21.01 | 41.01 | 42.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818.log.json) | -| ANN | R-101-D8 | 512x512 | 80000 | 12.5 | 14.12 | 42.94 | 44.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818.log.json) | -| ANN | R-50-D8 | 512x512 | 160000 | - | - | 41.74 | 42.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733.log.json) | -| ANN | R-101-D8 | 512x512 | 160000 | - | - | 42.94 | 44.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733.log.json) | - -### Pascal VOC 2012 + Aug - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| ANN | R-50-D8 | 512x512 | 20000 | 6 | 20.92 | 74.86 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246.log.json) | -| ANN | R-101-D8 | 512x512 | 20000 | 9.5 | 13.94 | 77.47 | 78.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246.log.json) | -| ANN | R-50-D8 | 512x512 | 40000 | - | - | 76.56 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314.log.json) | -| ANN | R-101-D8 | 512x512 | 40000 | - | - | 76.70 | 78.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314.log.json) | diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/conditioners.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/conditioners.py deleted file mode 100644 index d10ac8dc96466375379c883cd62f7c04a1bb0a73..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,1411 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import math -from pathlib import Path -import random -import re -import typing as tp -import warnings - -import einops -from num2words import num2words -import spacy -from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .chroma import ChromaExtractor -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio import audio_read -from ..data.audio_dataset import SegmentInfo -from ..data.audio_utils import convert_audio -from ..environment import AudioCraftEnvironment -from ..quantization import ResidualVectorQuantizer -from ..utils.autocast import TorchAutocast -from ..utils.cache import EmbeddingCache -from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: torch.Tensor - length: torch.Tensor - sample_rate: tp.List[int] - path: tp.List[tp.Optional[str]] = [] - seek_time: tp.List[tp.Optional[float]] = [] - - -class JointEmbedCondition(tp.NamedTuple): - wav: torch.Tensor - text: tp.List[tp.Optional[str]] - length: torch.Tensor - sample_rate: tp.List[int] - path: tp.List[tp.Optional[str]] = [] - seek_time: tp.List[tp.Optional[float]] = [] - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def joint_embed_attributes(self): - return self.joint_embed.keys() - - @property - def attributes(self): - return { - "text": self.text_attributes, - "wav": self.wav_attributes, - "joint_embed": self.joint_embed_attributes, - } - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """Transform an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) - dim (int): The dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: A tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert isinstance(condition, tuple) and \ - isinstance(condition[0], torch.Tensor) and \ - isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(cond: WavCondition) -> WavCondition: - """Transform a WavCondition to a nullified WavCondition. - It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. - - Args: - cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. - Returns: - WavCondition: Nullified wav condition. - """ - null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), - sample_rate=cond.sample_rate, - path=[None] * cond.wav.shape[0], - seek_time=[None] * cond.wav.shape[0], - ) - - -def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: - """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, - and replacing metadata by dummy attributes. - - Args: - cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. - """ - null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) - return JointEmbedCondition( - wav=null_wav, text=[None] * len(embed.text), - length=torch.LongTensor([0]).to(embed.wav.device), - sample_rate=embed.sample_rate, - path=[None] * embed.wav.shape[0], - seek_time=[0] * embed.wav.shape[0], - ) - - -class Tokenizer: - """Base tokenizer implementation - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATION = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__(self, texts: tp.List[tp.Optional[str]], - return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (list[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tuple[torch.Tensor, torch.Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(torch.Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuation - text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. - We allow the output dim to be different than the hidden dim for two reasons: - 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model. - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim: int, output_dim: int): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == 'whitespace': - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == 'noop': - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__['t5'] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) - mask = inputs['attention_mask'] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs['attention_mask'] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, x: WavCondition) -> WavCondition: - wav, length, sample_rate, path, seek_time = x - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) - - def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: - """Gets as input a WavCondition and returns a dense embedding.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, x: WavCondition) -> ConditionType: - """Extract condition embedding and mask from a waveform and its metadata. - Args: - x (WavCondition): Waveform condition containing raw waveform and metadata. - Returns: - ConditionType: a dense vector representing the conditioning along with its mask - """ - wav, lengths, *_ = x - with torch.no_grad(): - embeds = self._get_wav_embedding(x) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner based on stems. - The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as - the drums and bass often dominate the chroma leading to the chroma features - not containing information about the melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma bins for the chroma extractor. - radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). - duration (int): duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): if True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, - device: tp.Union[torch.device, str] = 'cpu', **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) - stem_sources: list = self.demucs.sources # type: ignore - self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, - radix2_exp=radix2_exp, **kwargs).to(device) - self.chroma_len = self._get_chroma_len() - self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) - self.cache = None - if cache_path is not None: - self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, - compute_embed_fn=self._get_full_chroma_for_cache, - extract_embed_fn=self._extract_chroma_chunk) - - def _downsampling_factor(self) -> int: - return self.chroma.winhop - - def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: - """Load pre-defined waveforms from a json. - These waveforms will be used for chroma extraction during evaluation. - This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). - """ - if path is None: - return None - - logger.info(f"Loading evaluation wavs from {path}") - from audiocraft.data.audio_dataset import AudioDataset - dataset: AudioDataset = AudioDataset.from_meta( - path, segment_duration=self.duration, min_audio_duration=self.duration, - sample_rate=self.sample_rate, channels=1) - - if len(dataset) > 0: - eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) - logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") - return eval_wavs - else: - raise ValueError("Could not find evaluation wavs, check lengths of wavs") - - def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: - self.eval_wavs = eval_wavs - - def has_eval_wavs(self) -> bool: - return self.eval_wavs is not None - - def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: - """Sample wavs from a predefined list.""" - assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." - total_eval_wavs = len(self.eval_wavs) - out = self.eval_wavs - if num_samples > total_eval_wavs: - out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) - return out[torch.randperm(len(out))][:num_samples] - - def _get_chroma_len(self) -> int: - """Get length of chroma during training.""" - dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: - """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio( - wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning - mix_wav = stems.sum(1) # merge extracted stems to single waveform - mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore - return mix_wav - - @torch.no_grad() - def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: - """Extract chroma features from the waveform.""" - with self.autocast: - return self.chroma(wav) - - @torch.no_grad() - def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: - """Compute wav embedding, applying stem and chroma extraction.""" - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self._extract_chroma(wav) - stems = self._get_stemmed_wav(wav, sample_rate) - chroma = self._extract_chroma(stems) - return chroma - - @torch.no_grad() - def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: - """Extract chroma from the whole audio waveform at the given path.""" - wav, sr = audio_read(path) - wav = wav[None].to(self.device) - wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) - chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] - return chroma - - def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: - """Extract a chunk of chroma from the full chroma derived from the full waveform.""" - wav_length = x.wav.shape[-1] - seek_time = x.seek_time[idx] - assert seek_time is not None, ( - "WavCondition seek_time is required " - "when extracting chroma chunks from pre-computed chroma.") - full_chroma = full_chroma.float() - frame_rate = self.sample_rate / self._downsampling_factor() - target_length = int(frame_rate * wav_length / self.sample_rate) - index = int(frame_rate * seek_time) - out = full_chroma[index: index + target_length] - out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] - return out.to(self.device) - - @torch.no_grad() - def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: - """Get the wav embedding from the WavCondition. - The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly - or will rely on the embedding cache to load the pre-computed embedding if relevant. - """ - sampled_wav: tp.Optional[torch.Tensor] = None - if not self.training and self.eval_wavs is not None: - warn_once(logger, "Using precomputed evaluation wavs!") - sampled_wav = self._sample_eval_wavs(len(x.wav)) - - no_undefined_paths = all(p is not None for p in x.path) - no_nullified_cond = x.wav.shape[-1] > 1 - if sampled_wav is not None: - chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) - elif self.cache is not None and no_undefined_paths and no_nullified_cond: - paths = [Path(p) for p in x.path if p is not None] - chroma = self.cache.get_embed_from_cache(paths, x) - else: - assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." - chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) - - if self.match_len_on_eval: - B, T, C = chroma.shape - if T > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") - elif T < self.chroma_len: - n_repeat = int(math.ceil(self.chroma_len / T)) - chroma = chroma.repeat(1, n_repeat, 1) - chroma = chroma[:, :self.chroma_len] - logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") - - return chroma - - def tokenize(self, x: WavCondition) -> WavCondition: - """Apply WavConditioner tokenization and populate cache if needed.""" - x = super().tokenize(x) - no_undefined_paths = all(p is not None for p in x.path) - if self.cache is not None and no_undefined_paths: - paths = [Path(p) for p in x.path if p is not None] - self.cache.populate_embed_cache(paths, x) - return x - - -class JointEmbeddingConditioner(BaseConditioner): - """Joint embedding conditioning supporting both audio or text conditioning. - - Args: - dim (int): Dimension. - output_dim (int): Output dimension. - device (str): Device. - attribute (str): Attribute used by the conditioner. - autocast_dtype (str): Autocast for the conditioner. - quantize (bool): Whether to quantize the CLAP embedding. - n_q (int): Number of residual quantizers (used if quantize is true). - bins (int): Quantizers' codebooks size (used if quantize is true). - kwargs: Additional parameters for residual vector quantizer. - """ - def __init__(self, dim: int, output_dim: int, device: str, attribute: str, - autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, - n_q: int = 12, bins: int = 1024, **kwargs): - super().__init__(dim=dim, output_dim=output_dim) - self.device = device - self.attribute = attribute - if autocast_dtype is None or device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # residual vector quantizer to discretize the conditioned embedding - self.quantizer: tp.Optional[ResidualVectorQuantizer] = None - if quantize: - self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) - - def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get joint embedding in latent space from the inputs. - - Returns: - tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding - and corresponding empty indexes. - """ - raise NotImplementedError() - - def forward(self, x: JointEmbedCondition) -> ConditionType: - with self.autocast: - embed, empty_idx = self._get_embed(x) - if self.quantizer is not None: - embed = embed.view(-1, self.dim, 1) - q_res = self.quantizer(embed, frame_rate=1) - out_embed = q_res.x.view(-1, self.dim) - else: - out_embed = embed - out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) - mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - out_embed = (out_embed * mask.unsqueeze(-1)) - return out_embed, mask - - def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: - return x - - -class CLAPEmbeddingConditioner(JointEmbeddingConditioner): - """Joint Embedding conditioner based on pre-trained CLAP model. - - This CLAP-based conditioner supports a caching mechanism - over the computed embeddings for faster training. - - Args: - dim (int): Dimension. - output_dim (int): Output dimension. - device (str): Device. - attribute (str): Attribute used by the conditioner. - quantize (bool): Whether to quantize the CLAP embedding. - n_q (int): Number of residual quantizers (used if quantize is true). - bins (int): Quantizers' codebooks size (used if quantize is true). - checkpoint (str): Path to CLAP checkpoint. - model_arch (str): CLAP model architecture. - enable_fusion (bool): Enable fusion for CLAP model. - sample_rate (int): Sample rate used by CLAP model. - max_audio_length (float): Maximum audio length for CLAP model. - audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. - normalize (bool): Whether to normalize the CLAP embedding. - text_p (float): Probability of using text representation instead of audio at train time. - batch_size (Optional[int]): Batch size for CLAP embedding computation. - autocast_dtype (str): Autocast for the conditioner. - cache_path (Optional[str]): Path for pre-computed embeddings caching. - kwargs: Additional parameters for residual vector quantizer. - """ - def __init__(self, dim: int, output_dim: int, device: str, attribute: str, - quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, - enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, - normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, - autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): - try: - import laion_clap # type: ignore - except ImportError: - raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") - checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint) - clap_tokenize = RobertaTokenizer.from_pretrained('roberta-base') - clap_model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch) - load_clap_state_dict(clap_model, checkpoint) - clap_model.eval() - clap_model.to(device) - super().__init__(dim=dim, output_dim=output_dim, device=device, attribute=attribute, - autocast_dtype=autocast_dtype, quantize=quantize, n_q=n_q, bins=bins, - **kwargs) - self.checkpoint = checkpoint - self.enable_fusion = enable_fusion - self.model_arch = model_arch - self.clap: laion_clap.CLAP_Module - self.clap_tokenize: RobertaTokenizer - self.clap_sample_rate = sample_rate - self.clap_max_frames = int(self.clap_sample_rate * max_audio_length) - self.clap_stride = int(self.clap_sample_rate * audio_stride) - self.batch_size = batch_size or 1 - self.normalize = normalize - self.text_p = text_p - self.__dict__['clap_tokenize'] = clap_tokenize - self.__dict__['clap'] = clap_model - self.wav_cache, self.text_cache = None, None - if cache_path is not None: - self.wav_cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, - compute_embed_fn=self._get_wav_embedding_for_cache, - extract_embed_fn=self._extract_wav_embedding_chunk) - self.text_cache = EmbeddingCache(Path(cache_path) / 'text', self.device, - compute_embed_fn=self._get_text_embedding_for_cache) - - def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict: - # we use the default params from CLAP module here as well - return self.clap_tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt") - - def _compute_text_embedding(self, text: tp.List[str]) -> torch.Tensor: - """Compute text embedding from CLAP model on a given a batch of text. - - Args: - text (list[str]): List of text for the batch, with B items. - Returns: - torch.Tensor: CLAP embedding derived from text, of shape [B, 1, D], with D the CLAP embedding dimension. - """ - with torch.no_grad(): - embed = self.clap.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True) - return embed.view(embed.size(0), 1, embed.size(-1)) - - def _get_text_embedding_for_cache(self, path: tp.Union[Path, str], - x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Get text embedding function for the cache.""" - text = x.text[idx] - text = text if text is not None else "" - return self._compute_text_embedding([text])[0] - - def _preprocess_wav(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int]) -> torch.Tensor: - """Preprocess wav to expected format by CLAP model. - - Args: - wav (torch.Tensor): Audio wav, of shape [B, C, T]. - length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. - sample_rates (list[int]): Sample rates for each sample in the batch - Returns: - torch.Tensor: Audio wav of shape [B, T]. - """ - assert wav.dim() == 3, "Expecting wav to be [B, C, T]" - if sample_rates is not None: - _wav = [] - for i, audio in enumerate(wav): - sr = sample_rates[i] - audio = convert_audio(audio, from_rate=sr, to_rate=self.clap_sample_rate, to_channels=1) - _wav.append(audio) - wav = torch.stack(_wav, dim=0) - wav = wav.mean(dim=1) - return wav - - def _compute_wav_embedding(self, wav: torch.Tensor, length: torch.Tensor, - sample_rates: tp.List[int], reduce_mean: bool = False) -> torch.Tensor: - """Compute audio wave embedding from CLAP model. - - Since CLAP operates on a fixed sequence length audio inputs and we need to process longer audio sequences, - we calculate the wav embeddings on `clap_max_frames` windows with `clap_stride`-second stride and - average the resulting embeddings. - - Args: - wav (torch.Tensor): Audio wav, of shape [B, C, T]. - length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. - sample_rates (list[int]): Sample rates for each sample in the batch. - reduce_mean (bool): Whether to get the average tensor. - Returns: - torch.Tensor: Audio embedding of shape [B, F, D], F being the number of chunks, D the dimension. - """ - with torch.no_grad(): - wav = self._preprocess_wav(wav, length, sample_rates) - B, T = wav.shape - if T >= self.clap_max_frames: - wav = wav.unfold(-1, self.clap_max_frames, self.clap_stride) # [B, F, T] - else: - wav = wav.view(-1, 1, T) # [B, F, T] with F=1 - wav = einops.rearrange(wav, 'b f t -> (b f) t') - embed_list = [] - for i in range(0, wav.size(0), self.batch_size): - _wav = wav[i:i+self.batch_size, ...] - _embed = self.clap.get_audio_embedding_from_data(_wav, use_tensor=True) - embed_list.append(_embed) - embed = torch.cat(embed_list, dim=0) - embed = einops.rearrange(embed, '(b f) d -> b f d', b=B) - if reduce_mean: - embed = embed.mean(dim=1, keepdim=True) - return embed # [B, F, D] with F=1 if reduce_mean is True - - def _get_wav_embedding_for_cache(self, path: tp.Union[str, Path], - x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Compute audio wave embedding for the cache. - The embedding is computed on a given audio read from file. - - Args: - path (str or Path): Path to the full audio file. - Returns: - torch.Tensor: Single-item tensor of shape [F, D], F being the number of chunks, D the dimension. - """ - wav, sr = audio_read(path) # [C, T] - wav = wav.unsqueeze(0).to(self.device) # [1, C, T] - wav_len = torch.LongTensor([wav.shape[-1]]).to(self.device) - embed = self._compute_wav_embedding(wav, wav_len, [sr], reduce_mean=False) # [B, F, D] - return embed.squeeze(0) # [F, D] - - def _extract_wav_embedding_chunk(self, full_embed: torch.Tensor, x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Extract the chunk of embedding matching the seek_time and length from the full CLAP audio embedding. - - Args: - full_embed (torch.Tensor): CLAP embedding computed on the full wave, of shape [F, D]. - x (JointEmbedCondition): Joint embedding condition for the full batch. - idx (int): Index considered for the given embedding to extract. - Returns: - torch.Tensor: Wav embedding averaged on sliding window, of shape [1, D]. - """ - sample_rate = x.sample_rate[idx] - seek_time = x.seek_time[idx] - seek_time = 0. if seek_time is None else seek_time - clap_stride = int(self.clap_stride / self.clap_sample_rate) * sample_rate - end_seek_time = seek_time + self.clap_max_frames / self.clap_sample_rate - start_offset = int(seek_time * sample_rate // clap_stride) - end_offset = int(end_seek_time * sample_rate // clap_stride) - wav_embed = full_embed[start_offset:end_offset, ...] - wav_embed = wav_embed.mean(dim=0, keepdim=True) - return wav_embed.to(self.device) # [F, D] - - def _get_text_embedding(self, x: JointEmbedCondition) -> torch.Tensor: - """Get CLAP embedding from a batch of text descriptions.""" - no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout - if self.text_cache is not None and no_nullified_cond: - assert all(p is not None for p in x.path), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - embed = self.text_cache.get_embed_from_cache(paths, x) - else: - text = [xi if xi is not None else "" for xi in x.text] - embed = self._compute_text_embedding(text) - if self.normalize: - embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) - return embed - - def _get_wav_embedding(self, x: JointEmbedCondition) -> torch.Tensor: - """Get CLAP embedding from a batch of audio tensors (and corresponding sample rates).""" - no_undefined_paths = all(p is not None for p in x.path) - no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout - if self.wav_cache is not None and no_undefined_paths and no_nullified_cond: - paths = [Path(p) for p in x.path if p is not None] - embed = self.wav_cache.get_embed_from_cache(paths, x) - else: - embed = self._compute_wav_embedding(x.wav, x.length, x.sample_rate, reduce_mean=True) - if self.normalize: - embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) - return embed - - def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: - # Trying to limit as much as possible sync points when the cache is warm. - no_undefined_paths = all(p is not None for p in x.path) - if self.wav_cache is not None and no_undefined_paths: - assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - self.wav_cache.populate_embed_cache(paths, x) - if self.text_cache is not None and no_undefined_paths: - assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - self.text_cache.populate_embed_cache(paths, x) - return x - - def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Extract shared latent representation from either the wav or the text using CLAP.""" - # decide whether to use text embedding at train time or not - use_text_embed = random.random() < self.text_p - if self.training and not use_text_embed: - embed = self._get_wav_embedding(x) - empty_idx = torch.LongTensor([]) # we assume we always have the audio wav - else: - embed = self._get_text_embedding(x) - empty_idx = torch.LongTensor([i for i, xi in enumerate(x.text) if xi is None or xi == ""]) - return embed, empty_idx - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes: - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using `nullify_condition` function. - If the condition is of any other type, set its value to None. - Works in-place. - """ - if condition_type not in ['text', 'wav', 'joint_embed']: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f" but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == 'wav': - wav_cond = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav_cond) - elif condition_type == 'joint_embed': - embed = sample.joint_embed[condition] - sample.joint_embed[condition] = nullify_joint_embed(embed) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base module for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Dropout with a given probability per attribute. - This is different from the behavior of ClassifierFreeGuidanceDropout as this allows for attributes - to be dropped out separately. For example, "artist" can be dropped while "genre" remains. - This is in contrast to ClassifierFreeGuidanceDropout where if "artist" is dropped "genre" - must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (list[ConditioningAttributes]): List of conditions. - Returns: - list[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Classifier Free Guidance dropout. - All attributes are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (list[ConditioningAttributes]): List of conditions. - Returns: - list[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Prepare and provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - device (torch.device or str, optional): Device for conditioners and output condition types. - """ - def __init__(self, conditioners: tp.Dict[str, BaseConditioner], device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - self.device = device - self.conditioners = nn.ModuleDict(conditioners) - - @property - def joint_embed_conditions(self): - return [m.attribute for m in self.conditioners.values() if isinstance(m, JointEmbeddingConditioner)] - - @property - def has_joint_embed_conditions(self): - return len(self.joint_embed_conditions) > 0 - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([isinstance(x, ConditioningAttributes) for x in inputs]), ( - "Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]", - f" but types were {set([type(x) for x in inputs])}" - ) - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - joint_embeds = self._collate_joint_embeds(inputs) - - assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), ( - f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ", - f"got {text.keys(), wavs.keys(), joint_embeds.keys()}" - ) - - for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations. - The output is for example: - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - - Args: - samples (list of ConditioningAttributes): List of ConditioningAttributes samples. - Returns: - dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch. - """ - out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - out[condition].append(text[condition]) - return out - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, WavCondition]: - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attributes. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (list of ConditioningAttributes): List of ConditioningAttributes samples. - Returns: - dict[str, WavCondition]: A dictionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lengths = defaultdict(list) - sample_rates = defaultdict(list) - paths = defaultdict(list) - seek_times = defaultdict(list) - out: tp.Dict[str, WavCondition] = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, sample_rate, path, seek_time = sample.wav[attribute] - assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]" - assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1" - # mono-channel conditioning - wav = wav.mean(1, keepdim=True) # [1, 1, T] - wavs[attribute].append(wav.flatten()) # [T] - lengths[attribute].append(length) - sample_rates[attribute].extend(sample_rate) - paths[attribute].extend(path) - seek_times[attribute].extend(seek_time) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition( - stacked_wav.unsqueeze(1), torch.cat(lengths[attribute]), sample_rates[attribute], - paths[attribute], seek_times[attribute]) - - return out - - def _collate_joint_embeds(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, JointEmbedCondition]: - """Generate a dict where the keys are attributes by which we compute joint embeddings, - and the values are Tensors of pre-computed embeddings and the corresponding text attributes. - - Args: - samples (list[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - A dictionary mapping an attribute name to joint embeddings. - """ - texts = defaultdict(list) - wavs = defaultdict(list) - lengths = defaultdict(list) - sample_rates = defaultdict(list) - paths = defaultdict(list) - seek_times = defaultdict(list) - channels: int = 0 - - out = {} - for sample in samples: - for attribute in self.joint_embed_conditions: - wav, text, length, sample_rate, path, seek_time = sample.joint_embed[attribute] - assert wav.dim() == 3 - if channels == 0: - channels = wav.size(1) - else: - assert channels == wav.size(1), "not all audio has same number of channels in batch" - assert wav.size(0) == 1, "Expecting single-wav batch in the collate method" - wav = einops.rearrange(wav, "b c t -> (b c t)") # [1, C, T] => [C * T] - wavs[attribute].append(wav) - texts[attribute].extend(text) - lengths[attribute].append(length) - sample_rates[attribute].extend(sample_rate) - paths[attribute].extend(path) - seek_times[attribute].extend(seek_time) - - for attribute in self.joint_embed_conditions: - stacked_texts = texts[attribute] - stacked_paths = paths[attribute] - stacked_seek_times = seek_times[attribute] - stacked_wavs = pad_sequence(wavs[attribute]).to(self.device) - stacked_wavs = einops.rearrange(stacked_wavs, "(c t) b -> b c t", c=channels) - stacked_sample_rates = sample_rates[attribute] - stacked_lengths = torch.cat(lengths[attribute]).to(self.device) - assert stacked_lengths.size(0) == stacked_wavs.size(0) - assert len(stacked_sample_rates) == stacked_wavs.size(0) - assert len(stacked_texts) == stacked_wavs.size(0) - out[attribute] = JointEmbedCondition( - text=stacked_texts, wav=stacked_wavs, - length=stacked_lengths, sample_rate=stacked_sample_rates, - path=stacked_paths, seek_time=stacked_seek_times) - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"Got invalid fuse method, allowed methods: {self.FUSING_METHODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: torch.Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (torch.Tensor): Transformer input. - conditions (dict[str, ConditionType]): Dict of conditions. - Returns: - tuple[torch.Tensor, torch.Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == 'sum': - input += cond - elif op == 'input_interpolate': - cond = einops.rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += einops.rearrange(cond, "b d t -> b t d") - elif op == 'prepend': - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == 'cross': - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/Hallucinate/demo/ldm/modules/diffusionmodules/__init__.py b/spaces/Hallucinate/demo/ldm/modules/diffusionmodules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HaloMaster/chinesesummary/fengshen/data/megatron_dataloader/helpers.cpp b/spaces/HaloMaster/chinesesummary/fengshen/data/megatron_dataloader/helpers.cpp deleted file mode 100644 index 31277dd1ce3a449bf962ba5a4d6343e7a9c0b5f9..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/data/megatron_dataloader/helpers.cpp +++ /dev/null @@ -1,794 +0,0 @@ -/* - coding=utf-8 - Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - */ - -/* Helper methods for fast index mapping builds */ - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace py = pybind11; -using namespace std; - -const int32_t LONG_SENTENCE_LEN = 512; - -void build_blending_indices(py::array_t &dataset_index, - py::array_t &dataset_sample_index, - const py::array_t &weights, - const int32_t num_datasets, - const int64_t size, const bool verbose) -{ - /* Given multiple datasets and a weighting array, build samples - such that it follows those wieghts.*/ - - if (verbose) - { - std::cout << "> building indices for blendable datasets ..." << std::endl; - } - - // Get the pointer access without the checks. - auto dataset_index_ptr = dataset_index.mutable_unchecked<1>(); - auto dataset_sample_index_ptr = dataset_sample_index.mutable_unchecked<1>(); - auto weights_ptr = weights.unchecked<1>(); - - // Initialize buffer for number of samples used for each dataset. - int64_t current_samples[num_datasets]; - for (int64_t i = 0; i < num_datasets; ++i) - { - current_samples[i] = 0; - } - - // For each sample: - for (int64_t sample_idx = 0; sample_idx < size; ++sample_idx) - { - - // Determine where the max error in sampling is happening. - auto sample_idx_double = std::max(static_cast(sample_idx), 1.0); - int64_t max_error_index = 0; - double max_error = weights_ptr[0] * sample_idx_double - - static_cast(current_samples[0]); - for (int64_t dataset_idx = 1; dataset_idx < num_datasets; ++dataset_idx) - { - double error = weights_ptr[dataset_idx] * sample_idx_double - - static_cast(current_samples[dataset_idx]); - if (error > max_error) - { - max_error = error; - max_error_index = dataset_idx; - } - } - - // Populate the indices. - dataset_index_ptr[sample_idx] = static_cast(max_error_index); - dataset_sample_index_ptr[sample_idx] = current_samples[max_error_index]; - - // Update the total samples. - current_samples[max_error_index] += 1; - } - - // print info - if (verbose) - { - std::cout << " > sample ratios:" << std::endl; - for (int64_t dataset_idx = 0; dataset_idx < num_datasets; ++dataset_idx) - { - auto ratio = static_cast(current_samples[dataset_idx]) / - static_cast(size); - std::cout << " dataset " << dataset_idx << ", input: " << weights_ptr[dataset_idx] << ", achieved: " << ratio << std::endl; - } - } -} - -py::array build_sample_idx(const py::array_t &sizes_, - const py::array_t &doc_idx_, - const int32_t seq_length, - const int32_t num_epochs, - const int64_t tokens_per_epoch) -{ - /* Sample index (sample_idx) is used for gpt2 like dataset for which - the documents are flattened and the samples are built based on this - 1-D flatten array. It is a 2D array with sizes [number-of-samples + 1, 2] - where [..., 0] contains the index into `doc_idx` and [..., 1] is the - starting offset in that document.*/ - - // Consistency checks. - assert(seq_length > 1); - assert(num_epochs > 0); - assert(tokens_per_epoch > 1); - - // Remove bound checks. - auto sizes = sizes_.unchecked<1>(); - auto doc_idx = doc_idx_.unchecked<1>(); - - // Mapping and it's length (1D). - int64_t num_samples = (num_epochs * tokens_per_epoch - 1) / seq_length; - int32_t *sample_idx = new int32_t[2 * (num_samples + 1)]; - - cout << " using:" << endl - << std::flush; - cout << " number of documents: " << doc_idx_.shape(0) / num_epochs << endl - << std::flush; - cout << " number of epochs: " << num_epochs << endl - << std::flush; - cout << " sequence length: " << seq_length << endl - << std::flush; - cout << " total number of samples: " << num_samples << endl - << std::flush; - - // Index into sample_idx. - int64_t sample_index = 0; - // Index into doc_idx. - int64_t doc_idx_index = 0; - // Begining offset for each document. - int32_t doc_offset = 0; - // Start with first document and no offset. - sample_idx[2 * sample_index] = doc_idx_index; - sample_idx[2 * sample_index + 1] = doc_offset; - ++sample_index; - - while (sample_index <= num_samples) - { - // Start with a fresh sequence. - int32_t remaining_seq_length = seq_length + 1; - while (remaining_seq_length != 0) - { - // Get the document length. - auto doc_id = doc_idx[doc_idx_index]; - auto doc_length = sizes[doc_id] - doc_offset; - // And add it to the current sequence. - remaining_seq_length -= doc_length; - // If we have more than a full sequence, adjust offset and set - // remaining length to zero so we return from the while loop. - // Note that -1 here is for the same reason we have -1 in - // `_num_epochs` calculations. - if (remaining_seq_length <= 0) - { - doc_offset += (remaining_seq_length + doc_length - 1); - remaining_seq_length = 0; - } - else - { - // Otherwise, start from the begining of the next document. - ++doc_idx_index; - doc_offset = 0; - } - } - // Record the sequence. - sample_idx[2 * sample_index] = doc_idx_index; - sample_idx[2 * sample_index + 1] = doc_offset; - ++sample_index; - } - - // Method to deallocate memory. - py::capsule free_when_done(sample_idx, [](void *mem_) - { - int32_t *mem = reinterpret_cast(mem_); - delete[] mem; - }); - - // Return the numpy array. - const auto byte_size = sizeof(int32_t); - return py::array(std::vector{num_samples + 1, 2}, // shape - {2 * byte_size, byte_size}, // C-style contiguous strides - sample_idx, // the data pointer - free_when_done); // numpy array references -} - -inline int32_t get_target_sample_len(const int32_t short_seq_ratio, - const int32_t max_length, - std::mt19937 &rand32_gen) -{ - /* Training sample length. */ - if (short_seq_ratio == 0) - { - return max_length; - } - const auto random_number = rand32_gen(); - if ((random_number % short_seq_ratio) == 0) - { - return 2 + random_number % (max_length - 1); - } - return max_length; -} - -template -py::array build_mapping_impl(const py::array_t &docs_, - const py::array_t &sizes_, - const int32_t num_epochs, - const uint64_t max_num_samples, - const int32_t max_seq_length, - const double short_seq_prob, - const int32_t seed, - const bool verbose, - const int32_t min_num_sent) -{ - /* Build a mapping of (start-index, end-index, sequence-length) where - start and end index are the indices of the sentences in the sample - and sequence-length is the target sequence length. - */ - - // Consistency checks. - assert(num_epochs > 0); - assert(max_seq_length > 1); - assert(short_seq_prob >= 0.0); - assert(short_seq_prob <= 1.0); - assert(seed > 0); - - // Remove bound checks. - auto docs = docs_.unchecked<1>(); - auto sizes = sizes_.unchecked<1>(); - - // For efficiency, convert probability to ratio. Note: rand() generates int. - int32_t short_seq_ratio = 0; - if (short_seq_prob > 0) - { - short_seq_ratio = static_cast(round(1.0 / short_seq_prob)); - } - - if (verbose) - { - const auto sent_start_index = docs[0]; - const auto sent_end_index = docs[docs_.shape(0) - 1]; - const auto num_sentences = sent_end_index - sent_start_index; - cout << " using:" << endl - << std::flush; - cout << " number of documents: " << docs_.shape(0) - 1 << endl - << std::flush; - cout << " sentences range: [" << sent_start_index << ", " << sent_end_index << ")" << endl - << std::flush; - cout << " total number of sentences: " << num_sentences << endl - << std::flush; - cout << " number of epochs: " << num_epochs << endl - << std::flush; - cout << " maximum number of samples: " << max_num_samples << endl - << std::flush; - cout << " maximum sequence length: " << max_seq_length << endl - << std::flush; - cout << " short sequence probability: " << short_seq_prob << endl - << std::flush; - cout << " short sequence ration (1/prob): " << short_seq_ratio << endl - << std::flush; - cout << " seed: " << seed << endl - << std::flush; - } - - // Mapping and it's length (1D). - int64_t num_samples = -1; - DocIdx *maps = NULL; - - // Perform two iterations, in the first iteration get the size - // and allocate memory and in the second iteration populate the map. - bool second = false; - for (int32_t iteration = 0; iteration < 2; ++iteration) - { - - // Set the seed so both iterations produce the same results. - std::mt19937 rand32_gen(seed); - - // Set the flag on second iteration. - second = (iteration == 1); - - // Counters: - uint64_t empty_docs = 0; - uint64_t one_sent_docs = 0; - uint64_t long_sent_docs = 0; - - // Current map index. - uint64_t map_index = 0; - - // For each epoch: - for (int32_t epoch = 0; epoch < num_epochs; ++epoch) - { - if (map_index >= max_num_samples) - { - if (verbose && (!second)) - { - cout << " reached " << max_num_samples << " samples after " - << epoch << " epochs ..." << endl - << std::flush; - } - break; - } - // For each document: - for (int32_t doc = 0; doc < (docs.shape(0) - 1); ++doc) - { - - // Document sentences are in [sent_index_first, sent_index_last) - const auto sent_index_first = docs[doc]; - const auto sent_index_last = docs[doc + 1]; - - // At the begining of the document previous index is the - // start index. - auto prev_start_index = sent_index_first; - - // Remaining documents. - auto num_remain_sent = sent_index_last - sent_index_first; - - // Some bookkeeping - if ((epoch == 0) && (!second)) - { - if (num_remain_sent == 0) - { - ++empty_docs; - } - if (num_remain_sent == 1) - { - ++one_sent_docs; - } - } - - // Detect documents with long sentences. - bool contains_long_sentence = false; - if (num_remain_sent > 1) - { - for (auto sent_index = sent_index_first; - sent_index < sent_index_last; ++sent_index) - { - if (sizes[sent_index] > LONG_SENTENCE_LEN) - { - if ((epoch == 0) && (!second)) - { - ++long_sent_docs; - } - contains_long_sentence = true; - break; - } - } - } - - // If we have more than two sentences. - if ((num_remain_sent >= min_num_sent) && (!contains_long_sentence)) - { - - // Set values. - auto seq_len = int32_t{0}; - auto num_sent = int32_t{0}; - auto target_seq_len = get_target_sample_len(short_seq_ratio, - max_seq_length, - rand32_gen); - - // Loop through sentences. - for (auto sent_index = sent_index_first; - sent_index < sent_index_last; ++sent_index) - { - - // Add the size and number of sentences. - seq_len += sizes[sent_index]; - ++num_sent; - --num_remain_sent; - - // If we have reached the target length. - // and if not only one sentence is left in the document. - // and if we have at least two sentneces. - // and if we have reached end of the document. - if (((seq_len >= target_seq_len) && - (num_remain_sent > 1) && - (num_sent >= min_num_sent)) || - (num_remain_sent == 0)) - { - - // Check for overflow. - if ((3 * map_index + 2) > - std::numeric_limits::max()) - { - cout << "number of samples exceeded maximum " - << "allowed by type int64: " - << std::numeric_limits::max() - << endl; - throw std::overflow_error("Number of samples"); - } - - // Populate the map. - if (second) - { - const auto map_index_0 = 3 * map_index; - maps[map_index_0] = static_cast(prev_start_index); - maps[map_index_0 + 1] = static_cast(sent_index + 1); - maps[map_index_0 + 2] = static_cast(target_seq_len); - } - - // Update indices / counters. - ++map_index; - prev_start_index = sent_index + 1; - target_seq_len = get_target_sample_len(short_seq_ratio, - max_seq_length, - rand32_gen); - seq_len = 0; - num_sent = 0; - } - - } // for (auto sent_index=sent_index_first; ... - } // if (num_remain_sent > 1) { - } // for (int doc=0; doc < num_docs; ++doc) { - } // for (int epoch=0; epoch < num_epochs; ++epoch) { - - if (!second) - { - if (verbose) - { - cout << " number of empty documents: " << empty_docs << endl - << std::flush; - cout << " number of documents with one sentence: " << one_sent_docs << endl - << std::flush; - cout << " number of documents with long sentences: " << long_sent_docs << endl - << std::flush; - cout << " will create mapping for " << map_index << " samples" << endl - << std::flush; - } - assert(maps == NULL); - assert(num_samples < 0); - maps = new DocIdx[3 * map_index]; - num_samples = static_cast(map_index); - } - - } // for (int iteration=0; iteration < 2; ++iteration) { - - // Shuffle. - // We need a 64 bit random number generator as we might have more - // than 2 billion samples. - std::mt19937_64 rand64_gen(seed + 1); - for (auto i = (num_samples - 1); i > 0; --i) - { - const auto j = static_cast(rand64_gen() % (i + 1)); - const auto i0 = 3 * i; - const auto j0 = 3 * j; - // Swap values. - swap(maps[i0], maps[j0]); - swap(maps[i0 + 1], maps[j0 + 1]); - swap(maps[i0 + 2], maps[j0 + 2]); - } - - // Method to deallocate memory. - py::capsule free_when_done(maps, [](void *mem_) - { - DocIdx *mem = reinterpret_cast(mem_); - delete[] mem; - }); - - // Return the numpy array. - const auto byte_size = sizeof(DocIdx); - return py::array(std::vector{num_samples, 3}, // shape - {3 * byte_size, byte_size}, // C-style contiguous strides - maps, // the data pointer - free_when_done); // numpy array references -} - -py::array build_mapping(const py::array_t &docs_, - const py::array_t &sizes_, - const int num_epochs, - const uint64_t max_num_samples, - const int max_seq_length, - const double short_seq_prob, - const int seed, - const bool verbose, - const int32_t min_num_sent) -{ - - if (sizes_.size() > std::numeric_limits::max()) - { - if (verbose) - { - cout << " using uint64 for data mapping..." << endl - << std::flush; - } - return build_mapping_impl(docs_, sizes_, num_epochs, - max_num_samples, max_seq_length, - short_seq_prob, seed, verbose, - min_num_sent); - } - else - { - if (verbose) - { - cout << " using uint32 for data mapping..." << endl - << std::flush; - } - return build_mapping_impl(docs_, sizes_, num_epochs, - max_num_samples, max_seq_length, - short_seq_prob, seed, verbose, - min_num_sent); - } -} - -template -py::array build_blocks_mapping_impl(const py::array_t &docs_, - const py::array_t &sizes_, - const py::array_t &titles_sizes_, - const int32_t num_epochs, - const uint64_t max_num_samples, - const int32_t max_seq_length, - const int32_t seed, - const bool verbose, - const bool use_one_sent_blocks) -{ - /* Build a mapping of (start-index, end-index, sequence-length) where - start and end index are the indices of the sentences in the sample - and sequence-length is the target sequence length. - */ - - // Consistency checks. - assert(num_epochs > 0); - assert(max_seq_length > 1); - assert(seed > 0); - - // Remove bound checks. - auto docs = docs_.unchecked<1>(); - auto sizes = sizes_.unchecked<1>(); - auto titles_sizes = titles_sizes_.unchecked<1>(); - - if (verbose) - { - const auto sent_start_index = docs[0]; - const auto sent_end_index = docs[docs_.shape(0) - 1]; - const auto num_sentences = sent_end_index - sent_start_index; - cout << " using:" << endl - << std::flush; - cout << " number of documents: " << docs_.shape(0) - 1 << endl - << std::flush; - cout << " sentences range: [" << sent_start_index << ", " << sent_end_index << ")" << endl - << std::flush; - cout << " total number of sentences: " << num_sentences << endl - << std::flush; - cout << " number of epochs: " << num_epochs << endl - << std::flush; - cout << " maximum number of samples: " << max_num_samples << endl - << std::flush; - cout << " maximum sequence length: " << max_seq_length << endl - << std::flush; - cout << " seed: " << seed << endl - << std::flush; - } - - // Mapping and its length (1D). - int64_t num_samples = -1; - DocIdx *maps = NULL; - - // Acceptable number of sentences per block. - int min_num_sent = 2; - if (use_one_sent_blocks) - { - min_num_sent = 1; - } - - // Perform two iterations, in the first iteration get the size - // and allocate memory and in the second iteration populate the map. - bool second = false; - for (int32_t iteration = 0; iteration < 2; ++iteration) - { - - // Set the flag on second iteration. - second = (iteration == 1); - - // Current map index. - uint64_t map_index = 0; - - uint64_t empty_docs = 0; - uint64_t one_sent_docs = 0; - uint64_t long_sent_docs = 0; - // For each epoch: - for (int32_t epoch = 0; epoch < num_epochs; ++epoch) - { - // assign every block a unique id - int32_t block_id = 0; - - if (map_index >= max_num_samples) - { - if (verbose && (!second)) - { - cout << " reached " << max_num_samples << " samples after " - << epoch << " epochs ..." << endl - << std::flush; - } - break; - } - // For each document: - for (int32_t doc = 0; doc < (docs.shape(0) - 1); ++doc) - { - - // Document sentences are in [sent_index_first, sent_index_last) - const auto sent_index_first = docs[doc]; - const auto sent_index_last = docs[doc + 1]; - const auto target_seq_len = max_seq_length - titles_sizes[doc]; - - // At the begining of the document previous index is the - // start index. - auto prev_start_index = sent_index_first; - - // Remaining documents. - auto num_remain_sent = sent_index_last - sent_index_first; - - // Some bookkeeping - if ((epoch == 0) && (!second)) - { - if (num_remain_sent == 0) - { - ++empty_docs; - } - if (num_remain_sent == 1) - { - ++one_sent_docs; - } - } - // Detect documents with long sentences. - bool contains_long_sentence = false; - if (num_remain_sent >= min_num_sent) - { - for (auto sent_index = sent_index_first; - sent_index < sent_index_last; ++sent_index) - { - if (sizes[sent_index] > LONG_SENTENCE_LEN) - { - if ((epoch == 0) && (!second)) - { - ++long_sent_docs; - } - contains_long_sentence = true; - break; - } - } - } - // If we have enough sentences and no long sentences. - if ((num_remain_sent >= min_num_sent) && (!contains_long_sentence)) - { - - // Set values. - auto seq_len = int32_t{0}; - auto num_sent = int32_t{0}; - - // Loop through sentences. - for (auto sent_index = sent_index_first; - sent_index < sent_index_last; ++sent_index) - { - - // Add the size and number of sentences. - seq_len += sizes[sent_index]; - ++num_sent; - --num_remain_sent; - - // If we have reached the target length. - // and there are an acceptable number of sentences left - // and if we have at least the minimum number of sentences. - // or if we have reached end of the document. - if (((seq_len >= target_seq_len) && - (num_remain_sent >= min_num_sent) && - (num_sent >= min_num_sent)) || - (num_remain_sent == 0)) - { - - // Populate the map. - if (second) - { - const auto map_index_0 = 4 * map_index; - // Each sample has 4 items: the starting sentence index, ending sentence index, - // the index of the document from which the block comes (used for fetching titles) - // and the unique id of the block (used for creating block indexes) - - maps[map_index_0] = static_cast(prev_start_index); - maps[map_index_0 + 1] = static_cast(sent_index + 1); - maps[map_index_0 + 2] = static_cast(doc); - maps[map_index_0 + 3] = static_cast(block_id); - } - - // Update indices / counters. - ++map_index; - ++block_id; - prev_start_index = sent_index + 1; - seq_len = 0; - num_sent = 0; - } - } // for (auto sent_index=sent_index_first; ... - } // if (num_remain_sent > 1) { - } // for (int doc=0; doc < num_docs; ++doc) { - } // for (int epoch=0; epoch < num_epochs; ++epoch) { - - if (!second) - { - if (verbose) - { - cout << " number of empty documents: " << empty_docs << endl - << std::flush; - cout << " number of documents with one sentence: " << one_sent_docs << endl - << std::flush; - cout << " number of documents with long sentences: " << long_sent_docs << endl - << std::flush; - cout << " will create mapping for " << map_index << " samples" << endl - << std::flush; - } - assert(maps == NULL); - assert(num_samples < 0); - maps = new DocIdx[4 * map_index]; - num_samples = static_cast(map_index); - } - - } // for (int iteration=0; iteration < 2; ++iteration) { - - // Shuffle. - // We need a 64 bit random number generator as we might have more - // than 2 billion samples. - std::mt19937_64 rand64_gen(seed + 1); - for (auto i = (num_samples - 1); i > 0; --i) - { - const auto j = static_cast(rand64_gen() % (i + 1)); - const auto i0 = 4 * i; - const auto j0 = 4 * j; - // Swap values. - swap(maps[i0], maps[j0]); - swap(maps[i0 + 1], maps[j0 + 1]); - swap(maps[i0 + 2], maps[j0 + 2]); - swap(maps[i0 + 3], maps[j0 + 3]); - } - - // Method to deallocate memory. - py::capsule free_when_done(maps, [](void *mem_) - { - DocIdx *mem = reinterpret_cast(mem_); - delete[] mem; - }); - - // Return the numpy array. - const auto byte_size = sizeof(DocIdx); - return py::array(std::vector{num_samples, 4}, // shape - {4 * byte_size, byte_size}, // C-style contiguous strides - maps, // the data pointer - free_when_done); // numpy array references -} - -py::array build_blocks_mapping(const py::array_t &docs_, - const py::array_t &sizes_, - const py::array_t &titles_sizes_, - const int num_epochs, - const uint64_t max_num_samples, - const int max_seq_length, - const int seed, - const bool verbose, - const bool use_one_sent_blocks) -{ - - if (sizes_.size() > std::numeric_limits::max()) - { - if (verbose) - { - cout << " using uint64 for data mapping..." << endl - << std::flush; - } - return build_blocks_mapping_impl(docs_, sizes_, titles_sizes_, - num_epochs, max_num_samples, max_seq_length, seed, verbose, use_one_sent_blocks); - } - else - { - if (verbose) - { - cout << " using uint32 for data mapping..." << endl - << std::flush; - } - return build_blocks_mapping_impl(docs_, sizes_, titles_sizes_, - num_epochs, max_num_samples, max_seq_length, seed, verbose, use_one_sent_blocks); - } -} - -PYBIND11_MODULE(helpers, m) -{ - m.def("build_mapping", &build_mapping); - m.def("build_blocks_mapping", &build_blocks_mapping); - m.def("build_sample_idx", &build_sample_idx); - m.def("build_blending_indices", &build_blending_indices); -} diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_large_tnews.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_large_tnews.sh deleted file mode 100644 index ec081cd3191f951c3815af423329540a219b0114..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_large_tnews.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=zen2_large_tnews # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=1 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:1 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o %x-%j.log # output and error file name (%x=job name, %j=job id) - - -# export CUDA_VISIBLE_DEVICES='2' -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions - -MODEL_NAME=zen2_large - -TASK=tnews - -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/ganruyi/experiments/classification_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/yangping/data/ChineseCLUE_DATA/${TASK}_public/ -PRETRAINED_MODEL_PATH=IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.json \ - --valid_data dev.json \ - --test_data test1.1.json \ - --train_batchsize 32 \ - --valid_batchsize 16 \ - --max_seq_length 128 \ - --texta_name sentence \ - --label_name label \ - --id_name id \ - --task_name tnews \ - " - -MODEL_ARGS="\ - --learning_rate 2e-5 \ - --weight_decay 0.01 \ - --warmup_ratio 0.01 \ - --num_labels 15 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_acc \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 400 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_acc:.4f} \ - " - -TRAINER_ARGS="\ - --max_epochs 10 \ - --gpus 1 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 400 \ - --default_root_dir $ROOT_DIR \ - " - - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \ - --do_lower_case \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ -" -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# python3 $SCRIPT_PATH $options -# source activate base -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/Hanyin/anime-remove-background/README.md b/spaces/Hanyin/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/Hanyin/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py deleted file mode 100644 index 2c87445d810cd790f887d1a135287a334cbdf223..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -import os - -import numpy as np - -import joblib -from examples.textless_nlp.gslm.speech2unit.clustering.utils import ( - get_audio_files, -) -from examples.textless_nlp.gslm.speech2unit.pretrained.utils import ( - get_features, -) - - -def get_logger(): - log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" - logging.basicConfig(format=log_format, level=logging.INFO) - logger = logging.getLogger(__name__) - return logger - - -def get_parser(): - parser = argparse.ArgumentParser( - description="Quantize using K-means clustering over acoustic features." - ) - parser.add_argument( - "--feature_type", - type=str, - choices=["logmel", "hubert", "w2v2", "cpc"], - default=None, - required=True, - help="Acoustic feature type", - ) - parser.add_argument( - "--acoustic_model_path", - type=str, - help="Pretrained acoustic model checkpoint" - ) - parser.add_argument( - "--layer", - type=int, - help="The layer of the pretrained model to extract features from", - default=-1, - ) - parser.add_argument( - "--kmeans_model_path", - type=str, - required=True, - help="K-means model file path to use for inference", - ) - parser.add_argument( - "--features_path", - type=str, - default=None, - help="Features file path. You don't need to enter acoustic model details if you have dumped features", - ) - parser.add_argument( - "--manifest_path", - type=str, - default=None, - help="Manifest file containing the root dir and file names", - ) - parser.add_argument( - "--out_quantized_file_path", - required=True, - type=str, - help="File path of quantized output.", - ) - parser.add_argument( - "--extension", type=str, default=".flac", help="Features file path" - ) - return parser - - -def main(args, logger): - # Feature extraction - if args.features_path is not None: - logger.info(f"Loading acoustic features from {args.features_path}...") - features_batch = np.load(args.features_path) - else: - logger.info(f"Extracting {args.feature_type} acoustic features...") - features_batch = get_features( - feature_type=args.feature_type, - checkpoint_path=args.acoustic_model_path, - layer=args.layer, - manifest_path=args.manifest_path, - sample_pct=1.0, - flatten=False, - ) - logger.info( - f"Features extracted for {len(features_batch)} utterances.\n" - ) - logger.info( - f"Dimensionality of representation = {features_batch[0].shape[1]}" - ) - - # K-means model - logger.info(f"Loading K-means model from {args.kmeans_model_path} ...") - kmeans_model = joblib.load(open(args.kmeans_model_path, "rb")) - kmeans_model.verbose = False - - _, fnames, _ = get_audio_files(args.manifest_path) - - os.makedirs(os.path.dirname(args.out_quantized_file_path), exist_ok=True) - print(f"Writing quantized predictions to {args.out_quantized_file_path}") - with open(args.out_quantized_file_path, "w") as fout: - for i, feats in enumerate(features_batch): - pred = kmeans_model.predict(feats) - pred_str = " ".join(str(p) for p in pred) - base_fname = os.path.basename(fnames[i]).rstrip(args.extension) - fout.write(f"{base_fname}|{pred_str}\n") - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - logger = get_logger() - logger.info(args) - main(args, logger) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/numel_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/numel_dataset.py deleted file mode 100644 index ac86dfd2f1d89055de909656d61d6aca85523f00..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/numel_dataset.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -from . import BaseWrapperDataset - - -class NumelDataset(BaseWrapperDataset): - def __init__(self, dataset, reduce=False): - super().__init__(dataset) - self.reduce = reduce - - def __getitem__(self, index): - item = self.dataset[index] - if torch.is_tensor(item): - return torch.numel(item) - else: - return np.size(item) - - def __len__(self): - return len(self.dataset) - - def collater(self, samples): - if self.reduce: - return sum(samples) - else: - return torch.tensor(samples) diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/tts.py b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/tts.py deleted file mode 100644 index dc485ec44dbf34ddbb69c15ad524c0fab189c3c5..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/tts.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals -from typing import Tuple -import sys -from argparse import ArgumentParser - -import torch -import numpy as np -import os -import json -import torch - -sys.path.append(os.path.join(os.path.dirname(__file__), "../../src/glow_tts")) - -from scipy.io.wavfile import write -from hifi.env import AttrDict -from hifi.models import Generator - - -from text import text_to_sequence -import commons -import models -import utils - - -def check_directory(dir): - if not os.path.exists(dir): - sys.exit("Error: {} directory does not exist".format(dir)) - - -class TextToMel: - def __init__(self, glow_model_dir, device="cuda"): - self.glow_model_dir = glow_model_dir - check_directory(self.glow_model_dir) - self.device = device - self.hps, self.glow_tts_model = self.load_glow_tts() - - def load_glow_tts(self): - hps = utils.get_hparams_from_dir(self.glow_model_dir) - checkpoint_path = utils.latest_checkpoint_path(self.glow_model_dir) - symbols = list(hps.data.punc) + list(hps.data.chars) - glow_tts_model = models.FlowGenerator( - len(symbols) + getattr(hps.data, "add_blank", False), - out_channels=hps.data.n_mel_channels, - **hps.model - ) # .to(self.device) - - if self.device == "cuda": - glow_tts_model.to("cuda") - - utils.load_checkpoint(checkpoint_path, glow_tts_model) - glow_tts_model.decoder.store_inverse() - _ = glow_tts_model.eval() - - return hps, glow_tts_model - - def generate_mel(self, text, noise_scale=0.667, length_scale=1.0): - print(f"Noise scale: {noise_scale} and Length scale: {length_scale}") - symbols = list(self.hps.data.punc) + list(self.hps.data.chars) - cleaner = self.hps.data.text_cleaners - if getattr(self.hps.data, "add_blank", False): - text_norm = text_to_sequence(text, symbols, cleaner) - text_norm = commons.intersperse(text_norm, len(symbols)) - else: # If not using "add_blank" option during training, adding spaces at the beginning and the end of utterance improves quality - text = " " + text.strip() + " " - text_norm = text_to_sequence(text, symbols, cleaner) - - sequence = np.array(text_norm)[None, :] - - del symbols - del cleaner - del text - del text_norm - - if self.device == "cuda": - x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long() - x_tst_lengths = torch.tensor([x_tst.shape[1]]).cuda() - else: - x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).long() - x_tst_lengths = torch.tensor([x_tst.shape[1]]) - - with torch.no_grad(): - (y_gen_tst, *_), *_, (attn_gen, *_) = self.glow_tts_model( - x_tst, - x_tst_lengths, - gen=True, - noise_scale=noise_scale, - length_scale=length_scale, - ) - del x_tst - del x_tst_lengths - torch.cuda.empty_cache() - return y_gen_tst.cpu().detach().numpy() - - -class MelToWav: - def __init__(self, hifi_model_dir, device="cuda"): - self.hifi_model_dir = hifi_model_dir - check_directory(self.hifi_model_dir) - self.device = device - self.h, self.hifi_gan_generator = self.load_hifi_gan() - - def load_hifi_gan(self): - checkpoint_path = utils.latest_checkpoint_path(self.hifi_model_dir, regex="g_*") - config_file = os.path.join(self.hifi_model_dir, "config.json") - data = open(config_file).read() - json_config = json.loads(data) - h = AttrDict(json_config) - torch.manual_seed(h.seed) - - generator = Generator(h).to(self.device) - - assert os.path.isfile(checkpoint_path) - print("Loading '{}'".format(checkpoint_path)) - state_dict_g = torch.load(checkpoint_path, map_location=self.device) - print("Complete.") - - generator.load_state_dict(state_dict_g["generator"]) - - generator.eval() - generator.remove_weight_norm() - - return h, generator - - def generate_wav(self, mel): - mel = torch.FloatTensor(mel).to(self.device) - - y_g_hat = self.hifi_gan_generator(mel) # passing through vocoder - audio = y_g_hat.squeeze() - audio = audio * 32768.0 - audio = audio.cpu().detach().numpy().astype("int16") - - del y_g_hat - del mel - torch.cuda.empty_cache() - return audio, self.h.sampling_rate - -def restricted_float(x): - try: - x = float(x) - except ValueError: - raise argparse.ArgumentTypeError("%r not a floating-point literal" % (x,)) - - if x < 0.0 or x > 1.0: - raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]"%(x,)) - return x - - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("-a", "--acoustic", required=True, type=str) - parser.add_argument("-v", "--vocoder", required=True, type=str) - parser.add_argument("-d", "--device", type=str, default="cpu") - parser.add_argument("-t", "--text", type=str, required=True) - parser.add_argument("-w", "--wav", type=str, required=True) - parser.add_argument("-n", "--noise-scale", default=0.667, type=restricted_float ) - parser.add_argument("-l", "--length-scale", default=1.0, type=float) - - args = parser.parse_args() - - text_to_mel = TextToMel(glow_model_dir=args.acoustic, device=args.device) - mel_to_wav = MelToWav(hifi_model_dir=args.vocoder, device=args.device) - - mel = text_to_mel.generate_mel(args.text, args.noise_scale, args.length_scale) - audio, sr = mel_to_wav.generate_wav(mel) - - write(filename=args.wav, rate=sr, data=audio) - diff --git a/spaces/HighCWu/GPEN/face_model/face_gan.py b/spaces/HighCWu/GPEN/face_model/face_gan.py deleted file mode 100644 index 1e2d56a461dc4293f73cbca1d7243fa0c656bf99..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GPEN/face_model/face_gan.py +++ /dev/null @@ -1,62 +0,0 @@ -''' -@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021) -@author: yangxy (yangtao9009@gmail.com) -''' -import torch -import os -import cv2 -import glob -import numpy as np -from torch import nn -import torch.nn.functional as F -from torchvision import transforms, utils -from model import FullGenerator, FullGenerator_SR - -class FaceGAN(object): - def __init__(self, base_dir='./', size=512, out_size=None, model=None, channel_multiplier=2, narrow=1, key=None, is_norm=True, device='cuda'): - self.mfile = os.path.join(base_dir, 'weights', model+'.pth') - self.n_mlp = 8 - self.device = device - self.is_norm = is_norm - self.in_resolution = size - self.out_resolution = size if out_size == None else out_size - self.key = key - self.load_model(channel_multiplier, narrow) - - def load_model(self, channel_multiplier=2, narrow=1): - if self.in_resolution == self.out_resolution: - self.model = FullGenerator(self.in_resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow, device=self.device) - else: - self.model = FullGenerator_SR(self.in_resolution, self.out_resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow, device=self.device) - pretrained_dict = torch.load(self.mfile, map_location=torch.device('cpu')) - if self.key is not None: pretrained_dict = pretrained_dict[self.key] - self.model.load_state_dict(pretrained_dict) - self.model.to(self.device) - self.model.eval() - - def process(self, img): - img = cv2.resize(img, (self.in_resolution, self.in_resolution)) - img_t = self.img2tensor(img) - - with torch.no_grad(): - out, __ = self.model(img_t) - del img_t - - out = self.tensor2img(out) - - return out - - def img2tensor(self, img): - img_t = torch.from_numpy(img).to(self.device)/255. - if self.is_norm: - img_t = (img_t - 0.5) / 0.5 - img_t = img_t.permute(2, 0, 1).unsqueeze(0).flip(1) # BGR->RGB - return img_t - - def tensor2img(self, img_t, pmax=255.0, imtype=np.uint8): - if self.is_norm: - img_t = img_t * 0.5 + 0.5 - img_t = img_t.squeeze(0).permute(1, 2, 0).flip(2) # RGB->BGR - img_np = np.clip(img_t.float().cpu().numpy(), 0, 1) * pmax - - return img_np.astype(imtype) \ No newline at end of file diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/external_utils.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/external_utils.py deleted file mode 100644 index e00b2f4fdd3c5fb09b06177fa1e40148b71aefd4..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/external_utils.py +++ /dev/null @@ -1,186 +0,0 @@ -"""Utility function for gradio/external.py""" - -import base64 -import json -import math -import operator -import re -import warnings -from typing import Any, Dict, List, Tuple - -import requests -import websockets -import yaml -from packaging import version -from websockets.legacy.protocol import WebSocketCommonProtocol - -from gradio import components, exceptions - -################## -# Helper functions for processing tabular data -################## - - -def get_tabular_examples(model_name: str) -> Dict[str, List[float]]: - readme = requests.get(f"https://huggingface.co/{model_name}/resolve/main/README.md") - if readme.status_code != 200: - warnings.warn(f"Cannot load examples from README for {model_name}", UserWarning) - example_data = {} - else: - yaml_regex = re.search( - "(?:^|[\r\n])---[\n\r]+([\\S\\s]*?)[\n\r]+---([\n\r]|$)", readme.text - ) - if yaml_regex is None: - example_data = {} - else: - example_yaml = next( - yaml.safe_load_all(readme.text[: yaml_regex.span()[-1]]) - ) - example_data = example_yaml.get("widget", {}).get("structuredData", {}) - if not example_data: - raise ValueError( - f"No example data found in README.md of {model_name} - Cannot build gradio demo. " - "See the README.md here: https://huggingface.co/scikit-learn/tabular-playground/blob/main/README.md " - "for a reference on how to provide example data to your model." - ) - # replace nan with string NaN for inference API - for data in example_data.values(): - for i, val in enumerate(data): - if isinstance(val, float) and math.isnan(val): - data[i] = "NaN" - return example_data - - -def cols_to_rows( - example_data: Dict[str, List[float]] -) -> Tuple[List[str], List[List[float]]]: - headers = list(example_data.keys()) - n_rows = max(len(example_data[header] or []) for header in headers) - data = [] - for row_index in range(n_rows): - row_data = [] - for header in headers: - col = example_data[header] or [] - if row_index >= len(col): - row_data.append("NaN") - else: - row_data.append(col[row_index]) - data.append(row_data) - return headers, data - - -def rows_to_cols(incoming_data: Dict) -> Dict[str, Dict[str, Dict[str, List[str]]]]: - data_column_wise = {} - for i, header in enumerate(incoming_data["headers"]): - data_column_wise[header] = [str(row[i]) for row in incoming_data["data"]] - return {"inputs": {"data": data_column_wise}} - - -################## -# Helper functions for processing other kinds of data -################## - - -def postprocess_label(scores: Dict) -> Dict: - sorted_pred = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) - return { - "label": sorted_pred[0][0], - "confidences": [ - {"label": pred[0], "confidence": pred[1]} for pred in sorted_pred - ], - } - - -def encode_to_base64(r: requests.Response) -> str: - # Handles the different ways HF API returns the prediction - base64_repr = base64.b64encode(r.content).decode("utf-8") - data_prefix = ";base64," - # Case 1: base64 representation already includes data prefix - if data_prefix in base64_repr: - return base64_repr - else: - content_type = r.headers.get("content-type") - # Case 2: the data prefix is a key in the response - if content_type == "application/json": - try: - content_type = r.json()[0]["content-type"] - base64_repr = r.json()[0]["blob"] - except KeyError: - raise ValueError( - "Cannot determine content type returned" "by external API." - ) - # Case 3: the data prefix is included in the response headers - else: - pass - new_base64 = "data:{};base64,".format(content_type) + base64_repr - return new_base64 - - -################## -# Helper functions for connecting to websockets -################## - - -async def get_pred_from_ws( - websocket: WebSocketCommonProtocol, data: str, hash_data: str -) -> Dict[str, Any]: - completed = False - resp = {} - while not completed: - msg = await websocket.recv() - resp = json.loads(msg) - if resp["msg"] == "queue_full": - raise exceptions.Error("Queue is full! Please try again.") - if resp["msg"] == "send_hash": - await websocket.send(hash_data) - elif resp["msg"] == "send_data": - await websocket.send(data) - completed = resp["msg"] == "process_completed" - return resp["output"] - - -def get_ws_fn(ws_url, headers): - async def ws_fn(data, hash_data): - async with websockets.connect( # type: ignore - ws_url, open_timeout=10, extra_headers=headers - ) as websocket: - return await get_pred_from_ws(websocket, data, hash_data) - - return ws_fn - - -def use_websocket(config, dependency): - queue_enabled = config.get("enable_queue", False) - queue_uses_websocket = version.parse( - config.get("version", "2.0") - ) >= version.Version("3.2") - dependency_uses_queue = dependency.get("queue", False) is not False - return queue_enabled and queue_uses_websocket and dependency_uses_queue - - -################## -# Helper function for cleaning up an Interface loaded from HF Spaces -################## - - -def streamline_spaces_interface(config: Dict) -> Dict: - """Streamlines the interface config dictionary to remove unnecessary keys.""" - config["inputs"] = [ - components.get_component_instance(component) - for component in config["input_components"] - ] - config["outputs"] = [ - components.get_component_instance(component) - for component in config["output_components"] - ] - parameters = { - "article", - "description", - "flagging_options", - "inputs", - "outputs", - "theme", - "title", - } - config = {k: config[k] for k in parameters} - return config diff --git a/spaces/HugoDzz/super-godot-galaxy/static/smg/index.worker.js b/spaces/HugoDzz/super-godot-galaxy/static/smg/index.worker.js deleted file mode 100644 index 5a62c09adf44aeab8ef9c10e498b4fe264f5390b..0000000000000000000000000000000000000000 --- a/spaces/HugoDzz/super-godot-galaxy/static/smg/index.worker.js +++ /dev/null @@ -1,164 +0,0 @@ -/** - * @license - * Copyright 2015 The Emscripten Authors - * SPDX-License-Identifier: MIT - */ - -// Pthread Web Worker startup routine: -// This is the entry point file that is loaded first by each Web Worker -// that executes pthreads on the Emscripten application. - -'use strict'; - -var Module = {}; - -// Thread-local guard variable for one-time init of the JS state -var initializedJS = false; - -// Proxying queues that were notified before the thread started and need to be -// executed as part of startup. -var pendingNotifiedProxyingQueues = []; - -function assert(condition, text) { - if (!condition) abort('Assertion failed: ' + text); -} - -function threadPrintErr() { - var text = Array.prototype.slice.call(arguments).join(' '); - console.error(text); -} -function threadAlert() { - var text = Array.prototype.slice.call(arguments).join(' '); - postMessage({cmd: 'alert', text: text, threadId: Module['_pthread_self']()}); -} -// We don't need out() for now, but may need to add it if we want to use it -// here. Or, if this code all moves into the main JS, that problem will go -// away. (For now, adding it here increases code size for no benefit.) -var out = () => { throw 'out() is not defined in worker.js.'; } -var err = threadPrintErr; -self.alert = threadAlert; - -Module['instantiateWasm'] = (info, receiveInstance) => { - // Instantiate from the module posted from the main thread. - // We can just use sync instantiation in the worker. - var instance = new WebAssembly.Instance(Module['wasmModule'], info); - // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, - // the above line no longer optimizes out down to the following line. - // When the regression is fixed, we can remove this if/else. - receiveInstance(instance); - // We don't need the module anymore; new threads will be spawned from the main thread. - Module['wasmModule'] = null; - return instance.exports; -} - -self.onmessage = (e) => { - try { - if (e.data.cmd === 'load') { // Preload command that is called once per worker to parse and load the Emscripten code. - - // Module and memory were sent from main thread - Module['wasmModule'] = e.data.wasmModule; - - Module['wasmMemory'] = e.data.wasmMemory; - - Module['buffer'] = Module['wasmMemory'].buffer; - - Module['ENVIRONMENT_IS_PTHREAD'] = true; - - if (typeof e.data.urlOrBlob == 'string') { - importScripts(e.data.urlOrBlob); - } else { - var objectUrl = URL.createObjectURL(e.data.urlOrBlob); - importScripts(objectUrl); - URL.revokeObjectURL(objectUrl); - } - Godot(Module).then(function (instance) { - Module = instance; - }); - } else if (e.data.cmd === 'run') { - // This worker was idle, and now should start executing its pthread entry - // point. - // performance.now() is specced to return a wallclock time in msecs since - // that Web Worker/main thread launched. However for pthreads this can - // cause subtle problems in emscripten_get_now() as this essentially - // would measure time from pthread_create(), meaning that the clocks - // between each threads would be wildly out of sync. Therefore sync all - // pthreads to the clock on the main browser thread, so that different - // threads see a somewhat coherent clock across each of them - // (+/- 0.1msecs in testing). - Module['__performance_now_clock_drift'] = performance.now() - e.data.time; - - // Pass the thread address to wasm to store it for fast access. - Module['__emscripten_thread_init'](e.data.pthread_ptr, /*isMainBrowserThread=*/0, /*isMainRuntimeThread=*/0, /*canBlock=*/1); - - assert(e.data.pthread_ptr); - // Also call inside JS module to set up the stack frame for this pthread in JS module scope - Module['establishStackSpace'](); - Module['PThread'].receiveObjectTransfer(e.data); - Module['PThread'].threadInitTLS(); - - if (!initializedJS) { - - // Execute any proxied work that came in before the thread was - // initialized. Only do this once because it is only possible for - // proxying notifications to arrive before thread initialization on - // fresh workers. - pendingNotifiedProxyingQueues.forEach(queue => { - Module['executeNotifiedProxyingQueue'](queue); - }); - pendingNotifiedProxyingQueues = []; - initializedJS = true; - } - - try { - Module['invokeEntryPoint'](e.data.start_routine, e.data.arg); - } catch(ex) { - if (ex != 'unwind') { - // ExitStatus not present in MINIMAL_RUNTIME - if (ex instanceof Module['ExitStatus']) { - if (Module['keepRuntimeAlive']()) { - err('Pthread 0x' + Module['_pthread_self']().toString(16) + ' called exit(), staying alive due to noExitRuntime.'); - } else { - err('Pthread 0x' + Module['_pthread_self']().toString(16) + ' called exit(), calling _emscripten_thread_exit.'); - Module['__emscripten_thread_exit'](ex.status); - } - } - else - { - // The pthread "crashed". Do not call `_emscripten_thread_exit` (which - // would make this thread joinable. Instead, re-throw the exception - // and let the top level handler propagate it back to the main thread. - throw ex; - } - } else { - // else e == 'unwind', and we should fall through here and keep the pthread alive for asynchronous events. - err('Pthread 0x' + Module['_pthread_self']().toString(16) + ' completed its main entry point with an `unwind`, keeping the worker alive for asynchronous operation.'); - } - } - } else if (e.data.cmd === 'cancel') { // Main thread is asking for a pthread_cancel() on this thread. - if (Module['_pthread_self']()) { - Module['__emscripten_thread_exit'](-1/*PTHREAD_CANCELED*/); - } - } else if (e.data.target === 'setimmediate') { - // no-op - } else if (e.data.cmd === 'processProxyingQueue') { - if (initializedJS) { - Module['executeNotifiedProxyingQueue'](e.data.queue); - } else { - // Defer executing this queue until the runtime is initialized. - pendingNotifiedProxyingQueues.push(e.data.queue); - } - } else { - err('worker.js received unknown command ' + e.data.cmd); - err(e.data); - } - } catch(ex) { - err('worker.js onmessage() captured an uncaught exception: ' + ex); - if (ex && ex.stack) err(ex.stack); - if (Module['__emscripten_thread_crashed']) { - Module['__emscripten_thread_crashed'](); - } - throw ex; - } -}; - - diff --git a/spaces/Hugorowan/image-to-video-film-2-og-by-kazuk/README.md b/spaces/Hugorowan/image-to-video-film-2-og-by-kazuk/README.md deleted file mode 100644 index 7e143a345fce489a0c9b7b5d9939d3f2867500af..0000000000000000000000000000000000000000 --- a/spaces/Hugorowan/image-to-video-film-2-og-by-kazuk/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Images to Video -emoji: 👁 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: unknown -duplicated_from: kazuk/image-to-video-film ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/generate_waveform.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/generate_waveform.py deleted file mode 100644 index bfc2ef8eb3d91366caf7609d75aa1795ab0ed8f9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/generate_waveform.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import logging -import matplotlib.pyplot as plt -import numpy as np -from pathlib import Path -import soundfile as sf -import sys -import torch -import torchaudio - -from fairseq import checkpoint_utils, options, tasks, utils -from fairseq.logging import progress_bar -from fairseq.tasks.text_to_speech import plot_tts_output -from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset - - -logging.basicConfig() -logging.root.setLevel(logging.INFO) -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def make_parser(): - parser = options.get_speech_generation_parser() - parser.add_argument("--dump-features", action="store_true") - parser.add_argument("--dump-waveforms", action="store_true") - parser.add_argument("--dump-attentions", action="store_true") - parser.add_argument("--dump-eos-probs", action="store_true") - parser.add_argument("--dump-plots", action="store_true") - parser.add_argument("--dump-target", action="store_true") - parser.add_argument("--output-sample-rate", default=22050, type=int) - parser.add_argument("--teacher-forcing", action="store_true") - parser.add_argument( - "--audio-format", type=str, default="wav", choices=["wav", "flac"] - ) - return parser - - -def postprocess_results( - dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target -): - def to_np(x): - return None if x is None else x.detach().cpu().numpy() - - sample_ids = [dataset.ids[i] for i in sample["id"].tolist()] - texts = sample["src_texts"] - attns = [to_np(hypo["attn"]) for hypo in hypos] - eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos] - feat_preds = [to_np(hypo["feature"]) for hypo in hypos] - wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos] - if dump_target: - feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos] - wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos] - else: - feat_targs = [None for _ in hypos] - wave_targs = [None for _ in hypos] - - return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds, - feat_targs, wave_targs) - - -def dump_result( - is_na_model, - args, - vocoder, - sample_id, - text, - attn, - eos_prob, - feat_pred, - wave_pred, - feat_targ, - wave_targ, -): - sample_rate = args.output_sample_rate - out_root = Path(args.results_path) - if args.dump_features: - feat_dir = out_root / "feat" - feat_dir.mkdir(exist_ok=True, parents=True) - np.save(feat_dir / f"{sample_id}.npy", feat_pred) - if args.dump_target: - feat_tgt_dir = out_root / "feat_tgt" - feat_tgt_dir.mkdir(exist_ok=True, parents=True) - np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ) - if args.dump_attentions: - attn_dir = out_root / "attn" - attn_dir.mkdir(exist_ok=True, parents=True) - np.save(attn_dir / f"{sample_id}.npy", attn.numpy()) - if args.dump_eos_probs and not is_na_model: - eos_dir = out_root / "eos" - eos_dir.mkdir(exist_ok=True, parents=True) - np.save(eos_dir / f"{sample_id}.npy", eos_prob) - - if args.dump_plots: - images = [feat_pred.T] if is_na_model else [feat_pred.T, attn] - names = ["output"] if is_na_model else ["output", "alignment"] - if feat_targ is not None: - images = [feat_targ.T] + images - names = [f"target (idx={sample_id})"] + names - if is_na_model: - plot_tts_output(images, names, attn, "alignment", suptitle=text) - else: - plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text) - plot_dir = out_root / "plot" - plot_dir.mkdir(exist_ok=True, parents=True) - plt.savefig(plot_dir / f"{sample_id}.png") - plt.close() - - if args.dump_waveforms: - ext = args.audio_format - if wave_pred is not None: - wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}" - wav_dir.mkdir(exist_ok=True, parents=True) - sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate) - if args.dump_target and wave_targ is not None: - wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt" - wav_tgt_dir.mkdir(exist_ok=True, parents=True) - sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate) - - -def main(args): - assert(args.dump_features or args.dump_waveforms or args.dump_attentions - or args.dump_eos_probs or args.dump_plots) - if args.max_tokens is None and args.batch_size is None: - args.max_tokens = 8000 - logger.info(args) - - use_cuda = torch.cuda.is_available() and not args.cpu - task = tasks.setup_task(args) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [args.path], - task=task, - ) - model = models[0].cuda() if use_cuda else models[0] - # use the original n_frames_per_step - task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step - task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task) - - data_cfg = task.data_cfg - sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050) - resample_fn = { - False: lambda x: x, - True: lambda x: torchaudio.sox_effects.apply_effects_tensor( - x.detach().cpu().unsqueeze(0), sample_rate, - [['rate', str(args.output_sample_rate)]] - )[0].squeeze(0) - }.get(args.output_sample_rate != sample_rate) - if args.output_sample_rate != sample_rate: - logger.info(f"resampling to {args.output_sample_rate}Hz") - - generator = task.build_generator([model], args) - itr = task.get_batch_iterator( - dataset=task.dataset(args.gen_subset), - max_tokens=args.max_tokens, - max_sentences=args.batch_size, - max_positions=(sys.maxsize, sys.maxsize), - ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, - required_batch_size_multiple=args.required_batch_size_multiple, - num_shards=args.num_shards, - shard_id=args.shard_id, - num_workers=args.num_workers, - data_buffer_size=args.data_buffer_size, - ).next_epoch_itr(shuffle=False) - - Path(args.results_path).mkdir(exist_ok=True, parents=True) - is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False) - dataset = task.dataset(args.gen_subset) - vocoder = task.args.vocoder - with progress_bar.build_progress_bar(args, itr) as t: - for sample in t: - sample = utils.move_to_cuda(sample) if use_cuda else sample - hypos = generator.generate(model, sample, has_targ=args.dump_target) - for result in postprocess_results( - dataset, sample, hypos, resample_fn, args.dump_target - ): - dump_result(is_na_model, args, vocoder, *result) - - -def cli_main(): - parser = make_parser() - args = options.parse_args_and_arch(parser) - main(args) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/optim/adagrad.py b/spaces/ICML2022/OFA/fairseq/fairseq/optim/adagrad.py deleted file mode 100644 index 4f539541c1c91d8c822f7ce624fa6eabf744f60e..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/optim/adagrad.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.optim - -from . import LegacyFairseqOptimizer, register_optimizer - - -@register_optimizer("adagrad") -class Adagrad(LegacyFairseqOptimizer): - def __init__(self, args, params): - super().__init__(args) - self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', - help='weight decay') - # fmt: on - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.args.lr[0], - "weight_decay": self.args.weight_decay, - } - - @property - def supports_flat_params(self): - return False diff --git a/spaces/Illumotion/Koboldcpp/examples/parallel/parallel.cpp b/spaces/Illumotion/Koboldcpp/examples/parallel/parallel.cpp deleted file mode 100644 index 721888da7de9491e6b85c15f26cbe6e8f6c8475e..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/parallel/parallel.cpp +++ /dev/null @@ -1,426 +0,0 @@ -// A basic application simulating a server with multiple clients. -// The clients submite requests to the server and they are processed in parallel. - -#include "build-info.h" - -#include "common.h" -#include "llama.h" - -#include -#include -#include -#include -#include - -// trim whitespace from the beginning and end of a string -static std::string trim(const std::string & str) { - size_t start = 0; - size_t end = str.size(); - - while (start < end && isspace(str[start])) { - start += 1; - } - - while (end > start && isspace(str[end - 1])) { - end -= 1; - } - - return str.substr(start, end - start); -} - -static std::string k_system = -R"(Transcript of a never ending dialog, where the User interacts with an Assistant. -The Assistant is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision. - -User: Recommend a nice restaurant in the area. -Assistant: I recommend the restaurant "The Golden Duck". It is a 5 star restaurant with a great view of the city. The food is delicious and the service is excellent. The prices are reasonable and the portions are generous. The restaurant is located at 123 Main Street, New York, NY 10001. The phone number is (212) 555-1234. The hours are Monday through Friday from 11:00 am to 10:00 pm. The restaurant is closed on Saturdays and Sundays. -User: Who is Richard Feynman? -Assistant: Richard Feynman was an American physicist who is best known for his work in quantum mechanics and particle physics. He was awarded the Nobel Prize in Physics in 1965 for his contributions to the development of quantum electrodynamics. He was a popular lecturer and author, and he wrote several books, including "Surely You're Joking, Mr. Feynman!" and "What Do You Care What Other People Think?". -User:)"; - -static std::vector k_prompts = { - "What is the meaning of life?", - "Tell me an interesting fact about llamas.", - "What is the best way to cook a steak?", - "Are you familiar with the Special Theory of Relativity and can you explain it to me?", - "Recommend some interesting books to read.", - "What is the best way to learn a new language?", - "How to get a job at Google?", - "If you could have any superpower, what would it be?", - "I want to learn how to play the piano.", -}; - -struct client { - int32_t id = 0; - - llama_seq_id seq_id = -1; - - llama_token sampled; - - int64_t t_start_prompt; - int64_t t_start_gen; - - int32_t n_prompt = 0; - int32_t n_decoded = 0; - int32_t i_batch = -1; - - std::string input; - std::string prompt; - std::string response; - - std::vector tokens_prev; -}; - -static void print_date_time() { - std::time_t current_time = std::time(nullptr); - std::tm* local_time = std::localtime(¤t_time); - char buffer[80]; - strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", local_time); - - printf("\n\033[35mrun parameters as at %s\033[0m\n", buffer); -} - -// Define a split string function to ... -static std::vector split_string(const std::string& input, char delimiter) { - std::vector tokens; - std::istringstream stream(input); - std::string token; - while (std::getline(stream, token, delimiter)) { - tokens.push_back(token); - } - return tokens; -} - -int main(int argc, char ** argv) { - srand(1234); - - gpt_params params; - - if (gpt_params_parse(argc, argv, params) == false) { - return 1; - } - - // number of simultaneous "clients" to simulate - const int32_t n_clients = params.n_parallel; - - // requests to simulate - const int32_t n_seq = params.n_sequences; - - // insert new requests as soon as the previous one is done - const bool cont_batching = params.cont_batching; - -#ifndef LOG_DISABLE_LOGS - log_set_target(log_filename_generator("parallel", "log")); - LOG_TEE("Log start\n"); - log_dump_cmdline(argc, argv); -#endif // LOG_DISABLE_LOGS - - // init llama.cpp - llama_backend_init(params.numa); - - llama_model * model = NULL; - llama_context * ctx = NULL; - - // load the target model - params.logits_all = true; - std::tie(model, ctx) = llama_init_from_gpt_params(params); - - // load the prompts from an external file if there are any - if (params.prompt.empty()) { - printf("\n\033[32mNo new questions so proceed with build-in defaults.\033[0m\n"); - } else { - // Output each line of the input params.prompts vector and copy to k_prompts - int index = 0; - printf("\n\033[32mNow printing the external prompt file %s\033[0m\n\n", params.prompt_file.c_str()); - - std::vector prompts = split_string(params.prompt, '\n'); - for (const auto& prompt : prompts) { - k_prompts.resize(index + 1); - k_prompts[index] = prompt; - index++; - printf("%3d prompt: %s\n", index, prompt.c_str()); - } - } - - fprintf(stderr, "\n\n"); - fflush(stderr); - - const int n_ctx = llama_n_ctx(ctx); - const int n_vocab = llama_n_vocab(model); - - std::vector clients(n_clients); - for (size_t i = 0; i < clients.size(); ++i) { - auto & client = clients[i]; - client.id = i; - client.tokens_prev.resize(std::max(256, params.n_predict)); - std::fill(client.tokens_prev.begin(), client.tokens_prev.end(), 0); - } - - std::vector candidates; - candidates.reserve(n_vocab); - - std::vector tokens_system; - tokens_system = ::llama_tokenize(ctx, k_system, true); - const int32_t n_tokens_system = tokens_system.size(); - - llama_seq_id g_seq_id = 0; - - // the max batch size is as large as the context to handle cases where we get very long input prompt from multiple - // users. regardless of the size, the main loop will chunk the batch into a maximum of params.n_batch tokens at a time - llama_batch batch = llama_batch_init(params.n_ctx, 0); - - int32_t n_total_prompt = 0; - int32_t n_total_gen = 0; - int32_t n_cache_miss = 0; - - const auto t_main_start = ggml_time_us(); - - LOG_TEE("%s: Simulating parallel requests from clients:\n", __func__); - LOG_TEE("%s: n_parallel = %d, n_sequences = %d, cont_batching = %d, system tokens = %d\n", __func__, n_clients, n_seq, cont_batching, n_tokens_system); - LOG_TEE("\n"); - - { - LOG_TEE("%s: Evaluating the system prompt ...\n", __func__); - - batch.n_tokens = n_tokens_system; - - for (int32_t i = 0; i < batch.n_tokens; ++i) { - batch.token[i] = tokens_system[i]; - batch.pos[i] = i; - batch.seq_id[i] = 0; - batch.logits[i] = false; - } - - if (llama_decode(ctx, batch) != 0) { - LOG_TEE("%s: llama_decode() failed\n", __func__); - return 1; - } - - // assign the system KV cache to all parallel sequences - for (int32_t i = 1; i < n_clients; ++i) { - llama_kv_cache_seq_cp(ctx, 0, i, 0, n_tokens_system); - } - - LOG_TEE("\n"); - } - - LOG_TEE("Processing requests ...\n\n"); - - while (true) { - batch.n_tokens = 0; - - // decode any currently ongoing sequences - for (auto & client : clients) { - if (client.seq_id == -1) { - continue; - } - - batch.token [batch.n_tokens] = client.sampled; - batch.pos [batch.n_tokens] = n_tokens_system + client.n_prompt + client.n_decoded; - batch.seq_id[batch.n_tokens] = client.id; - batch.logits[batch.n_tokens] = true; - - client.n_decoded += 1; - client.i_batch = batch.n_tokens; - - batch.n_tokens += 1; - } - - if (batch.n_tokens == 0) { - // all sequences have ended - clear the entire KV cache - for (int i = 0; i < n_clients; ++i) { - llama_kv_cache_seq_rm(ctx, i, n_tokens_system, -1); - } - - LOG_TEE("%s: clearing the KV cache\n", __func__); - } - - // insert new sequences for decoding - if (cont_batching || batch.n_tokens == 0) { - for (auto & client : clients) { - if (client.seq_id == -1 && g_seq_id < n_seq) { - client.seq_id = g_seq_id; - - client.t_start_prompt = ggml_time_us(); - client.t_start_gen = 0; - - client.input = k_prompts[rand() % k_prompts.size()]; - client.prompt = client.input + "\nAssistant:"; - client.response = ""; - - std::fill(client.tokens_prev.begin(), client.tokens_prev.end(), 0); - - // do not prepend BOS because we have a system prompt! - std::vector tokens_prompt; - tokens_prompt = ::llama_tokenize(ctx, client.prompt, false); - - for (size_t i = 0; i < tokens_prompt.size(); ++i) { - batch.token [batch.n_tokens] = tokens_prompt[i]; - batch.pos [batch.n_tokens] = i + n_tokens_system; - batch.seq_id[batch.n_tokens] = client.id; - batch.logits[batch.n_tokens] = false; - batch.n_tokens += 1; - } - - // extract the logits only for the last token - if (batch.n_tokens > 0) { - batch.logits[batch.n_tokens - 1] = true; - } - - client.n_prompt = tokens_prompt.size(); - client.n_decoded = 0; - client.i_batch = batch.n_tokens - 1; - - LOG_TEE("\033[31mClient %3d, seq %4d, started decoding ...\033[0m\n", client.id, client.seq_id); - - g_seq_id += 1; - - // insert new requests one-by-one - //if (cont_batching) { - // break; - //} - } - } - } - - if (batch.n_tokens == 0) { - break; - } - - // process in chunks of params.n_batch - int32_t n_batch = params.n_batch; - - for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { - // experiment: process in powers of 2 - //if (i + n_batch > (int32_t) batch.n_tokens && n_batch > 32) { - // n_batch /= 2; - // i -= n_batch; - // continue; - //} - - const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); - - llama_batch batch_view = { - n_tokens, - batch.token + i, - nullptr, - batch.pos + i, - batch.seq_id + i, - batch.logits + i, - 0, 0, 0, // unused - }; - - const int ret = llama_decode(ctx, batch_view); - if (ret != 0) { - if (n_batch == 1 || ret < 0) { - // if you get here, it means the KV cache is full - try increasing it via the context size - LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret); - return 1; - } - - LOG("%s : failed to decode the batch, retrying with n_batch = %d\n", __func__, n_batch / 2); - - n_cache_miss += 1; - - // retry with half the batch size to try to find a free slot in the KV cache - n_batch /= 2; - i -= n_batch; - - continue; - } - - LOG("%s : decoded batch of %d tokens\n", __func__, n_tokens); - - for (auto & client : clients) { - if (client.i_batch < (int) i || client.i_batch >= (int) (i + n_tokens)) { - continue; - } - - //printf("client %d, seq %d, token %d, pos %d, batch %d\n", - // client.id, client.seq_id, client.sampled, client.n_decoded, client.i_batch); - - const llama_token id = llama_sample_token(ctx, NULL, NULL, params, client.tokens_prev, candidates, client.i_batch - i); - - if (client.n_decoded == 1) { - // start measuring generation time after the first token to make sure all concurrent clients - // have their prompt already processed - client.t_start_gen = ggml_time_us(); - } - - // remember which tokens were sampled - used for repetition penalties during sampling - client.tokens_prev.erase(client.tokens_prev.begin()); - client.tokens_prev.push_back(id); - - const std::string token_str = llama_token_to_piece(ctx, id); - client.response += token_str; - client.sampled = id; - - //printf("client %d, seq %d, token %d, pos %d, batch %d: %s\n", - // client.id, client.seq_id, id, client.n_decoded, client.i_batch, token_str.c_str()); - - if (client.n_decoded > 2 && - (id == llama_token_eos(ctx) || - (params.n_predict > 0 && client.n_decoded + client.n_prompt >= params.n_predict) || - client.response.find("User:") != std::string::npos || - client.response.find('\n') != std::string::npos)) { - // basic reverse prompt - const size_t pos = client.response.find("User:"); - if (pos != std::string::npos) { - client.response = client.response.substr(0, pos); - } - - // delete only the generated part of the sequence, i.e. keep the system prompt in the cache - llama_kv_cache_seq_rm(ctx, client.id, n_tokens_system, -1); - - const auto t_main_end = ggml_time_us(); - - LOG_TEE("\033[31mClient %3d, seq %3d/%3d, prompt %4d t, response %4d t, time %5.2f s, speed %5.2f t/s, cache miss %d \033[0m \nInput: %s\n\033[35mResponse: %s\033[0m\n\n", - client.id, client.seq_id, n_seq, client.n_prompt, client.n_decoded, - (t_main_end - client.t_start_prompt) / 1e6, - (double) (client.n_prompt + client.n_decoded) / (t_main_end - client.t_start_prompt) * 1e6, - n_cache_miss, - ::trim(client.input).c_str(), - ::trim(client.response).c_str()); - - n_total_prompt += client.n_prompt; - n_total_gen += client.n_decoded; - - client.seq_id = -1; - } - - client.i_batch = -1; - } - } - } - - const auto t_main_end = ggml_time_us(); - - print_date_time(); - - LOG_TEE("\n%s: n_parallel = %d, n_sequences = %d, cont_batching = %d, system tokens = %d\n", __func__, n_clients, n_seq, cont_batching, n_tokens_system); - if (params.prompt_file.empty()) { - params.prompt_file = "used built-in defaults"; - } - LOG_TEE("External prompt file: \033[32m%s\033[0m\n", params.prompt_file.c_str()); - LOG_TEE("Model and path used: \033[32m%s\033[0m\n\n", params.model.c_str()); - - LOG_TEE("Total prompt tokens: %6d, speed: %5.2f t/s\n", n_total_prompt, (double) (n_total_prompt ) / (t_main_end - t_main_start) * 1e6); - LOG_TEE("Total gen tokens: %6d, speed: %5.2f t/s\n", n_total_gen, (double) (n_total_gen ) / (t_main_end - t_main_start) * 1e6); - LOG_TEE("Total speed (AVG): %6s speed: %5.2f t/s\n", "", (double) (n_total_prompt + n_total_gen) / (t_main_end - t_main_start) * 1e6); - LOG_TEE("Cache misses: %6d\n", n_cache_miss); - - LOG_TEE("\n"); - - llama_print_timings(ctx); - - llama_batch_free(batch); - - llama_free(ctx); - llama_free_model(model); - - llama_backend_free(); - - fprintf(stderr, "\n\n"); - - return 0; -} diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/segmentation.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/segmentation.py deleted file mode 100644 index 3d4a9f94eaae84722db584277dbbf9bc41ede357..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/segmentation.py +++ /dev/null @@ -1,43 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .constants import weights as constant_weights - - -class CrossEntropy2d(nn.Module): - def __init__(self, reduction="mean", ignore_label=255, weights=None, *args, **kwargs): - """ - weight (Tensor, optional): a manual rescaling weight given to each class. - If given, has to be a Tensor of size "nclasses" - """ - super(CrossEntropy2d, self).__init__() - self.reduction = reduction - self.ignore_label = ignore_label - self.weights = weights - if self.weights is not None: - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.weights = torch.FloatTensor(constant_weights[weights]).to(device) - - def forward(self, predict, target): - """ - Args: - predict:(n, c, h, w) - target:(n, 1, h, w) - """ - target = target.long() - assert not target.requires_grad - assert predict.dim() == 4, "{0}".format(predict.size()) - assert target.dim() == 4, "{0}".format(target.size()) - assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) - assert target.size(1) == 1, "{0}".format(target.size(1)) - assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) - assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3)) - target = target.squeeze(1) - n, c, h, w = predict.size() - target_mask = (target >= 0) * (target != self.ignore_label) - target = target[target_mask] - predict = predict.transpose(1, 2).transpose(2, 3).contiguous() - predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c) - loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction) - return loss diff --git a/spaces/Jaehan/Text-Generation-3/app.py b/spaces/Jaehan/Text-Generation-3/app.py deleted file mode 100644 index 78fa3913d1a86b484c6ea6e60f3b5b91b5163bc5..0000000000000000000000000000000000000000 --- a/spaces/Jaehan/Text-Generation-3/app.py +++ /dev/null @@ -1,24 +0,0 @@ -from transformers import GPT2LMHeadModel, GPT2Tokenizer -import gradio as gr - -model_name = "gpt2" -model = GPT2LMHeadModel.from_pretrained(model_name) -tokenizer = GPT2Tokenizer.from_pretrained(model_name) - -def generate(text): - token_ids = tokenizer.encode(text, return_tensors="pt") - gpt2_tensors = model.generate(token_ids, - max_length=200, - no_repeat_ngram_size=True, - num_beams=3, - do_sample=True, - temperature=1.5) - - response="" - for i, x in enumerate(gpt2_tensors): - response += f"{i}: {tokenizer.decode(x, skip_special_tokens=True)}" - return response - -in_text = gr.Textbox(lines=1, label="English", placeholder="English text here") -out = gr.Textbox(lines=1, label="Generated tensors") -gr.Interface(generate, inputs=in_text, outputs=out).launch() \ No newline at end of file diff --git a/spaces/Jeff2323/ai-comic-factory/src/components/ui/card.tsx b/spaces/Jeff2323/ai-comic-factory/src/components/ui/card.tsx deleted file mode 100644 index 6583ebc1bb942bfb94e00fb4e7c7d685073c7b2a..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/components/ui/card.tsx +++ /dev/null @@ -1,79 +0,0 @@ -import * as React from "react" - -import { cn } from "@/lib/utils" - -const Card = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
-)) -Card.displayName = "Card" - -const CardHeader = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
-)) -CardHeader.displayName = "CardHeader" - -const CardTitle = React.forwardRef< - HTMLParagraphElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

-)) -CardTitle.displayName = "CardTitle" - -const CardDescription = React.forwardRef< - HTMLParagraphElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

-)) -CardDescription.displayName = "CardDescription" - -const CardContent = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

-)) -CardContent.displayName = "CardContent" - -const CardFooter = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
-)) -CardFooter.displayName = "CardFooter" - -export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent } diff --git a/spaces/JeffJing/ZookChatBot/app.py b/spaces/JeffJing/ZookChatBot/app.py deleted file mode 100644 index cad21214aed65ebaaea246bc7e25f065eb77cfee..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import gradio as gr -from steamship import Steamship - -#if you have OpenAI API key as an environment variable, enable the below -#openai.api_key = os.getenv("OPENAI_API_KEY") - -#if you have OpenAI API key as a string, enable the below - -client = Steamship(workspace="gpt-411111111") -generator = client.use_plugin('gpt-4') - -prompt = "你好,我是做客ChatBot,当前运行在OpenAI GPT-4模型,欢迎大家通过我体验GPT-4的强大。" - - -def openai_create(prompt): - task = generator.generate(text=prompt) - task.wait() - return task.output.blocks[0].text - - -def chatgpt_clone(input, history): - history = history or [] - s = list(sum(history, ())) - s.append(input) - inp = ' '.join(s) - output = openai_create(input) - output = output.replace("\n", "
") - history.append((input, output)) - return history, history - - -block = gr.Blocks() - -with block: - gr.Markdown("""""") - chatbot = gr.Chatbot() - message = gr.Textbox(placeholder=prompt, label="开聊:") - state = gr.State() - submit = gr.Button("提交") - submit.click(chatgpt_clone, - inputs=[message, state], - outputs=[chatbot, state]) - -block.launch(debug=True) \ No newline at end of file diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/models/configuration_moss.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/models/configuration_moss.py deleted file mode 100644 index 9bad4396ecea6578c1628732d0ef077d8964d45d..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/models/configuration_moss.py +++ /dev/null @@ -1,118 +0,0 @@ -""" Moss model configuration""" - -from transformers.utils import logging -from transformers.configuration_utils import PretrainedConfig - - -logger = logging.get_logger(__name__) - - -class MossConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`MossModel`]. It is used to instantiate a - Moss model according to the specified arguments, defining the model architecture. Instantiating a configuration - with the defaults will yield a similar configuration to that of the Moss - [fnlp/moss-moon-003-base](https://huggingface.co/fnlp/moss-moon-003-base) architecture. Configuration objects - inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from - [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 107008): - Vocabulary size of the Moss model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`MossModel`]. - n_positions (`int`, *optional*, defaults to 2048): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - n_embd (`int`, *optional*, defaults to 4096): - Dimensionality of the embeddings and hidden states. - n_layer (`int`, *optional*, defaults to 28): - Number of hidden layers in the Transformer encoder. - n_head (`int`, *optional*, defaults to 16): - Number of attention heads for each attention layer in the Transformer encoder. - rotary_dim (`int`, *optional*, defaults to 64): - Number of dimensions in the embedding that Rotary Position Embedding is applied to. - n_inner (`int`, *optional*, defaults to None): - Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd - activation_function (`str`, *optional*, defaults to `"gelu_new"`): - Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. - resid_pdrop (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - embd_pdrop (`int`, *optional*, defaults to 0.1): - The dropout ratio for the embeddings. - attn_pdrop (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention. - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): - The epsilon to use in the layer normalization layers. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). - - Example: - - ```python - >>> from modeling_moss import MossModel - >>> from configuration_moss import MossConfig - - >>> # Initializing a moss-moon-003-base configuration - >>> configuration = MossConfig() - - >>> # Initializing a model (with random weights) from the configuration - >>> model = MossModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "moss" - attribute_map = { - "max_position_embeddings": "n_positions", - "hidden_size": "n_embd", - "num_attention_heads": "n_head", - "num_hidden_layers": "n_layer", - } - - def __init__( - self, - vocab_size=107008, - n_positions=2048, - n_ctx=2048, - n_embd=4096, - n_layer=28, - n_head=16, - rotary_dim=64, - n_inner=None, - activation_function="gelu_new", - resid_pdrop=0.0, - embd_pdrop=0.0, - attn_pdrop=0.0, - layer_norm_epsilon=1e-5, - initializer_range=0.02, - use_cache=True, - bos_token_id=106028, - eos_token_id=106068, - tie_word_embeddings=False, - **kwargs, - ): - self.vocab_size = vocab_size - self.n_ctx = n_ctx - self.n_positions = n_positions - self.n_embd = n_embd - self.n_layer = n_layer - self.n_head = n_head - self.n_inner = n_inner - self.rotary_dim = rotary_dim - self.activation_function = activation_function - self.resid_pdrop = resid_pdrop - self.embd_pdrop = embd_pdrop - self.attn_pdrop = attn_pdrop - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.use_cache = use_cache - - self.bos_token_id = bos_token_id - self.eos_token_id = eos_token_id - - super().__init__( - bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs - ) diff --git a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/text/cantonese.py b/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/text/cantonese.py deleted file mode 100644 index b66d12138b81b70b86f18217d24a08fce76305c0..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/Kevin676/AutoGPT/README.md b/spaces/Kevin676/AutoGPT/README.md deleted file mode 100644 index 5bf09b995f04f7af05d1314906b1b1ff39c20ddc..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AutoGPT -emoji: 🦾 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.27.0 -app_file: ui/app.py -pinned: false -license: mit -duplicated_from: aliabid94/AutoGPT ---- - diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/speaker_verification_dataset.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/speaker_verification_dataset.py deleted file mode 100644 index 77a6e05eae6a939ae7575ae70b7173644141fffe..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/speaker_verification_dataset.py +++ /dev/null @@ -1,56 +0,0 @@ -from encoder.data_objects.random_cycler import RandomCycler -from encoder.data_objects.speaker_batch import SpeakerBatch -from encoder.data_objects.speaker import Speaker -from encoder.params_data import partials_n_frames -from torch.utils.data import Dataset, DataLoader -from pathlib import Path - -# TODO: improve with a pool of speakers for data efficiency - -class SpeakerVerificationDataset(Dataset): - def __init__(self, datasets_root: Path): - self.root = datasets_root - speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()] - if len(speaker_dirs) == 0: - raise Exception("No speakers found. Make sure you are pointing to the directory " - "containing all preprocessed speaker directories.") - self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs] - self.speaker_cycler = RandomCycler(self.speakers) - - def __len__(self): - return int(1e10) - - def __getitem__(self, index): - return next(self.speaker_cycler) - - def get_logs(self): - log_string = "" - for log_fpath in self.root.glob("*.txt"): - with log_fpath.open("r") as log_file: - log_string += "".join(log_file.readlines()) - return log_string - - -class SpeakerVerificationDataLoader(DataLoader): - def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, - batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, - worker_init_fn=None): - self.utterances_per_speaker = utterances_per_speaker - - super().__init__( - dataset=dataset, - batch_size=speakers_per_batch, - shuffle=False, - sampler=sampler, - batch_sampler=batch_sampler, - num_workers=num_workers, - collate_fn=self.collate, - pin_memory=pin_memory, - drop_last=False, - timeout=timeout, - worker_init_fn=worker_init_fn - ) - - def collate(self, speakers): - return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames) - \ No newline at end of file diff --git a/spaces/KeyDev/NOC-classification/app.py b/spaces/KeyDev/NOC-classification/app.py deleted file mode 100644 index fced8846b9b730030ff3059c124c2857ec7fc104..0000000000000000000000000000000000000000 --- a/spaces/KeyDev/NOC-classification/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/google/flan-t5-xxl").launch() \ No newline at end of file diff --git a/spaces/KonradSzafer/HF-QA-Demo/tests/__init__.py b/spaces/KonradSzafer/HF-QA-Demo/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/KyanChen/BuildingExtraction/Train.py b/spaces/KyanChen/BuildingExtraction/Train.py deleted file mode 100644 index 8a34e02dafd1ed55a686e90e43dc0e031cd35986..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/BuildingExtraction/Train.py +++ /dev/null @@ -1,185 +0,0 @@ -import os -# Change the numbers when you want to train with specific gpus -# os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3' -import torch -from STTNet import STTNet -import torch.nn.functional as F -from Utils.Datasets import get_data_loader -from Utils.Utils import make_numpy_img, inv_normalize_img, encode_onehot_to_mask, get_metrics, Logger -import matplotlib.pyplot as plt -import numpy as np -from collections import OrderedDict -from torch.optim.lr_scheduler import MultiStepLR - -if __name__ == '__main__': - model_infos = { - # vgg16_bn, resnet50, resnet18 - 'backbone': 'resnet50', - 'pretrained': True, - 'out_keys': ['block4'], - 'in_channel': 3, - 'n_classes': 2, - 'top_k_s': 64, - 'top_k_c': 16, - 'encoder_pos': True, - 'decoder_pos': True, - 'model_pattern': ['X', 'A', 'S', 'C'], - - 'BATCH_SIZE': 8, - 'IS_SHUFFLE': True, - 'NUM_WORKERS': 0, - 'DATASET': 'Tools/generate_dep_info/train_data.csv', - 'model_path': 'Checkpoints', - 'log_path': 'Results', - # if you need the validation process. - 'IS_VAL': True, - 'VAL_BATCH_SIZE': 4, - 'VAL_DATASET': 'Tools/generate_dep_info/val_data.csv', - # if you need the test process. - 'IS_TEST': True, - 'TEST_DATASET': 'Tools/generate_dep_info/test_data.csv', - 'IMG_SIZE': [512, 512], - 'PHASE': 'seg', - - # INRIA Dataset - 'PRIOR_MEAN': [0.40672500537632994, 0.42829032416229895, 0.39331840468605667], - 'PRIOR_STD': [0.029498464618176873, 0.027740088491668233, 0.028246722411879095], - # # # WHU Dataset - # 'PRIOR_MEAN': [0.4352682576428411, 0.44523221318154493, 0.41307610541534784], - # 'PRIOR_STD': [0.026973196780331585, 0.026424642808887323, 0.02791246590291434], - - # if you want to load state dict - 'load_checkpoint_path': r'E:\BuildingExtractionDataset\INRIA_ckpt_latest.pt', - # if you want to resume a checkpoint - 'resume_checkpoint_path': '', - - } - os.makedirs(model_infos['model_path'], exist_ok=True) - if model_infos['IS_VAL']: - os.makedirs(model_infos['log_path']+'/val', exist_ok=True) - if model_infos['IS_TEST']: - os.makedirs(model_infos['log_path']+'/test', exist_ok=True) - logger = Logger(model_infos['log_path'] + '/log.log') - - data_loaders = get_data_loader(model_infos) - loss_weight = 0.1 - model = STTNet(**model_infos) - - epoch_start = 0 - if model_infos['load_checkpoint_path'] is not None and os.path.exists(model_infos['load_checkpoint_path']): - logger.write(f'load checkpoint from {model_infos["load_checkpoint_path"]}\n') - state_dict = torch.load(model_infos['load_checkpoint_path'], map_location='cpu') - model_dict = state_dict['model_state_dict'] - try: - model_dict = OrderedDict({k.replace('module.', ''): v for k, v in model_dict.items()}) - model.load_state_dict(model_dict) - except Exception as e: - model.load_state_dict(model_dict) - if model_infos['resume_checkpoint_path'] is not None and os.path.exists(model_infos['resume_checkpoint_path']): - logger.write(f'resume checkpoint path from {model_infos["resume_checkpoint_path"]}\n') - state_dict = torch.load(model_infos['resume_checkpoint_path'], map_location='cpu') - epoch_start = state_dict['epoch_id'] - model_dict = state_dict['model_state_dict'] - logger.write(f'resume checkpoint from epoch {epoch_start}\n') - try: - model_dict = OrderedDict({k.replace('module.', ''): v for k, v in model_dict.items()}) - model.load_state_dict(model_dict) - except Exception as e: - model.load_state_dict(model_dict) - model = model.cuda() - device_ids = range(torch.cuda.device_count()) - if len(device_ids) > 1: - model = torch.nn.DataParallel(model, device_ids=device_ids) - logger.write(f'Use GPUs: {device_ids}\n') - else: - logger.write(f'Use GPUs: 1\n') - optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4) - max_epoch = 300 - scheduler = MultiStepLR(optimizer, [int(max_epoch*2/3), int(max_epoch*5/6)], 0.5) - - for epoch_id in range(epoch_start, max_epoch): - pattern = 'train' - model.train() # Set model to training mode - for batch_id, batch in enumerate(data_loaders[pattern]): - # Get data - img_batch = batch['img'].cuda() - label_batch = batch['label'].cuda() - - # inference - optimizer.zero_grad() - logits, att_branch_output = model(img_batch) - - # compute loss - label_downs = F.interpolate(label_batch, att_branch_output.size()[2:], mode='nearest') - loss_branch = F.binary_cross_entropy_with_logits(att_branch_output, label_downs) - loss_master = F.binary_cross_entropy_with_logits(logits, label_batch) - loss = loss_master + loss_weight * loss_branch - # loss backward - loss.backward() - optimizer.step() - - if batch_id % 20 == 1: - logger.write( - f'{pattern}: {epoch_id}/{max_epoch} {batch_id}/{len(data_loaders[pattern])} loss: {loss.item():.4f}\n') - - scheduler.step() - patterns = ['val', 'test'] - for pattern_id, is_pattern in enumerate([model_infos['IS_VAL'], model_infos['IS_TEST']]): - if is_pattern: - # pred: logits, tensor, nBatch * nClass * W * H - # target: labels, tensor, nBatch * nClass * W * H - # output, batch['label'] - collect_result = {'pred': [], 'target': []} - pattern = patterns[pattern_id] - model.eval() - for batch_id, batch in enumerate(data_loaders[pattern]): - # Get data - img_batch = batch['img'].cuda() - label_batch = batch['label'].cuda() - img_names = batch['img_name'] - collect_result['target'].append(label_batch.data.cpu()) - - # inference - with torch.no_grad(): - logits, att_branch_output = model(img_batch) - - collect_result['pred'].append(logits.data.cpu()) - # get segmentation result, when the phase is test. - pred_label = torch.argmax(logits, 1) - pred_label *= 255 - - if pattern == 'test' or batch_id % 5 == 1: - batch_size = pred_label.size(0) - # k = np.clip(int(0.3 * batch_size), a_min=1, a_max=batch_size) - # ids = np.random.choice(range(batch_size), k, replace=False) - ids = range(batch_size) - for img_id in ids: - img = img_batch[img_id].detach().cpu() - target = label_batch[img_id].detach().cpu() - pred = pred_label[img_id].detach().cpu() - img_name = img_names[img_id] - - img = make_numpy_img( - inv_normalize_img(img, model_infos['PRIOR_MEAN'], model_infos['PRIOR_STD'])) - target = make_numpy_img(encode_onehot_to_mask(target)) * 255 - pred = make_numpy_img(pred) - - vis = np.concatenate([img / 255., target / 255., pred / 255.], axis=0) - vis = np.clip(vis, a_min=0, a_max=1) - file_name = os.path.join(model_infos['log_path'], pattern, f'Epoch_{epoch_id}_{img_name.split(".")[0]}.png') - plt.imsave(file_name, vis) - - collect_result['pred'] = torch.cat(collect_result['pred'], dim=0) - collect_result['target'] = torch.cat(collect_result['target'], dim=0) - IoU, OA, F1_score = get_metrics('seg', **collect_result) - logger.write(f'{pattern}: {epoch_id}/{max_epoch} Iou:{IoU[-1]:.4f} OA:{OA[-1]:.4f} F1:{F1_score[-1]:.4f}\n') - if epoch_id % 20 == 1: - torch.save({ - 'epoch_id': epoch_id, - 'model_state_dict': model.state_dict() - }, os.path.join(model_infos['model_path'], f'ckpt_{epoch_id}.pt')) - torch.save({ - 'epoch_id': epoch_id, - 'model_state_dict': model.state_dict() - }, os.path.join(model_infos['model_path'], f'ckpt_latest.pt')) - diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/hourglass.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/hourglass.py deleted file mode 100644 index bb58799f7b32138b3f58383419ddce9aa6d5ca18..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/hourglass.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Sequence - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule - -from mmdet.registry import MODELS -from mmdet.utils import ConfigType, OptMultiConfig -from ..layers import ResLayer -from .resnet import BasicBlock - - -class HourglassModule(BaseModule): - """Hourglass Module for HourglassNet backbone. - - Generate module recursively and use BasicBlock as the base unit. - - Args: - depth (int): Depth of current HourglassModule. - stage_channels (list[int]): Feature channels of sub-modules in current - and follow-up HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in current and - follow-up HourglassModule. - norm_cfg (ConfigType): Dictionary to construct and config norm layer. - Defaults to `dict(type='BN', requires_grad=True)` - upsample_cfg (ConfigType): Config dict for interpolate layer. - Defaults to `dict(mode='nearest')` - init_cfg (dict or ConfigDict, optional): the config to control the - initialization. - """ - - def __init__(self, - depth: int, - stage_channels: List[int], - stage_blocks: List[int], - norm_cfg: ConfigType = dict(type='BN', requires_grad=True), - upsample_cfg: ConfigType = dict(mode='nearest'), - init_cfg: OptMultiConfig = None) -> None: - super().__init__(init_cfg) - - self.depth = depth - - cur_block = stage_blocks[0] - next_block = stage_blocks[1] - - cur_channel = stage_channels[0] - next_channel = stage_channels[1] - - self.up1 = ResLayer( - BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) - - self.low1 = ResLayer( - BasicBlock, - cur_channel, - next_channel, - cur_block, - stride=2, - norm_cfg=norm_cfg) - - if self.depth > 1: - self.low2 = HourglassModule(depth - 1, stage_channels[1:], - stage_blocks[1:]) - else: - self.low2 = ResLayer( - BasicBlock, - next_channel, - next_channel, - next_block, - norm_cfg=norm_cfg) - - self.low3 = ResLayer( - BasicBlock, - next_channel, - cur_channel, - cur_block, - norm_cfg=norm_cfg, - downsample_first=False) - - self.up2 = F.interpolate - self.upsample_cfg = upsample_cfg - - def forward(self, x: torch.Tensor) -> nn.Module: - """Forward function.""" - up1 = self.up1(x) - low1 = self.low1(x) - low2 = self.low2(low1) - low3 = self.low3(low2) - # Fixing `scale factor` (e.g. 2) is common for upsampling, but - # in some cases the spatial size is mismatched and error will arise. - if 'scale_factor' in self.upsample_cfg: - up2 = self.up2(low3, **self.upsample_cfg) - else: - shape = up1.shape[2:] - up2 = self.up2(low3, size=shape, **self.upsample_cfg) - return up1 + up2 - - -@MODELS.register_module() -class HourglassNet(BaseModule): - """HourglassNet backbone. - - Stacked Hourglass Networks for Human Pose Estimation. - More details can be found in the `paper - `_ . - - Args: - downsample_times (int): Downsample times in a HourglassModule. - num_stacks (int): Number of HourglassModule modules stacked, - 1 for Hourglass-52, 2 for Hourglass-104. - stage_channels (Sequence[int]): Feature channel of each sub-module in a - HourglassModule. - stage_blocks (Sequence[int]): Number of sub-modules stacked in a - HourglassModule. - feat_channel (int): Feature channel of conv after a HourglassModule. - norm_cfg (norm_cfg): Dictionary to construct and config norm layer. - init_cfg (dict or ConfigDict, optional): the config to control the - initialization. - - Example: - >>> from mmdet.models import HourglassNet - >>> import torch - >>> self = HourglassNet() - >>> self.eval() - >>> inputs = torch.rand(1, 3, 511, 511) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... print(tuple(level_output.shape)) - (1, 256, 128, 128) - (1, 256, 128, 128) - """ - - def __init__(self, - downsample_times: int = 5, - num_stacks: int = 2, - stage_channels: Sequence = (256, 256, 384, 384, 384, 512), - stage_blocks: Sequence = (2, 2, 2, 2, 2, 4), - feat_channel: int = 256, - norm_cfg: ConfigType = dict(type='BN', requires_grad=True), - init_cfg: OptMultiConfig = None) -> None: - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super().__init__(init_cfg) - - self.num_stacks = num_stacks - assert self.num_stacks >= 1 - assert len(stage_channels) == len(stage_blocks) - assert len(stage_channels) > downsample_times - - cur_channel = stage_channels[0] - - self.stem = nn.Sequential( - ConvModule( - 3, cur_channel // 2, 7, padding=3, stride=2, - norm_cfg=norm_cfg), - ResLayer( - BasicBlock, - cur_channel // 2, - cur_channel, - 1, - stride=2, - norm_cfg=norm_cfg)) - - self.hourglass_modules = nn.ModuleList([ - HourglassModule(downsample_times, stage_channels, stage_blocks) - for _ in range(num_stacks) - ]) - - self.inters = ResLayer( - BasicBlock, - cur_channel, - cur_channel, - num_stacks - 1, - norm_cfg=norm_cfg) - - self.conv1x1s = nn.ModuleList([ - ConvModule( - cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.out_convs = nn.ModuleList([ - ConvModule( - cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) - for _ in range(num_stacks) - ]) - - self.remap_convs = nn.ModuleList([ - ConvModule( - feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.relu = nn.ReLU(inplace=True) - - def init_weights(self) -> None: - """Init module weights.""" - # Training Centripetal Model needs to reset parameters for Conv2d - super().init_weights() - for m in self.modules(): - if isinstance(m, nn.Conv2d): - m.reset_parameters() - - def forward(self, x: torch.Tensor) -> List[torch.Tensor]: - """Forward function.""" - inter_feat = self.stem(x) - out_feats = [] - - for ind in range(self.num_stacks): - single_hourglass = self.hourglass_modules[ind] - out_conv = self.out_convs[ind] - - hourglass_feat = single_hourglass(inter_feat) - out_feat = out_conv(hourglass_feat) - out_feats.append(out_feat) - - if ind < self.num_stacks - 1: - inter_feat = self.conv1x1s[ind]( - inter_feat) + self.remap_convs[ind]( - out_feat) - inter_feat = self.inters[ind](self.relu(inter_feat)) - - return out_feats diff --git a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/src/vision.cpp b/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/src/vision.cpp deleted file mode 100644 index 4a08821e0121a77556aa7a263ec8ebfa928b13b6..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/src/vision.cpp +++ /dev/null @@ -1,21 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -/*! -* Copyright (c) Facebook, Inc. and its affiliates. -* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR -*/ - -#include "ms_deform_attn.h" - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward"); - m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward"); -} diff --git a/spaces/Lightxr/sd-diffusers-webui/modules/model.py b/spaces/Lightxr/sd-diffusers-webui/modules/model.py deleted file mode 100644 index 70fa00ee4b52f1c9ad7cc10c52c201b64ceb5fd8..0000000000000000000000000000000000000000 --- a/spaces/Lightxr/sd-diffusers-webui/modules/model.py +++ /dev/null @@ -1,897 +0,0 @@ -import importlib -import inspect -import math -from pathlib import Path -import re -from collections import defaultdict -from typing import List, Optional, Union - -import time -import k_diffusion -import numpy as np -import PIL -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange -from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser -from modules.prompt_parser import FrozenCLIPEmbedderWithCustomWords -from torch import einsum -from torch.autograd.function import Function - -from diffusers import DiffusionPipeline -from diffusers.utils import PIL_INTERPOLATION, is_accelerate_available -from diffusers.utils import logging, randn_tensor - -import modules.safe as _ -from safetensors.torch import load_file - -xformers_available = False -try: - import xformers - - xformers_available = True -except ImportError: - pass - -EPSILON = 1e-6 -exists = lambda val: val is not None -default = lambda val, d: val if exists(val) else d -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def get_attention_scores(attn, query, key, attention_mask=None): - - if attn.upcast_attention: - query = query.float() - key = key.float() - - attention_scores = torch.baddbmm( - torch.empty( - query.shape[0], - query.shape[1], - key.shape[1], - dtype=query.dtype, - device=query.device, - ), - query, - key.transpose(-1, -2), - beta=0, - alpha=attn.scale, - ) - - if attention_mask is not None: - attention_scores = attention_scores + attention_mask - - if attn.upcast_softmax: - attention_scores = attention_scores.float() - - return attention_scores - - -class CrossAttnProcessor(nn.Module): - def __call__( - self, - attn, - hidden_states, - encoder_hidden_states=None, - attention_mask=None, - ): - batch_size, sequence_length, _ = hidden_states.shape - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length) - - encoder_states = hidden_states - is_xattn = False - if encoder_hidden_states is not None: - is_xattn = True - img_state = encoder_hidden_states["img_state"] - encoder_states = encoder_hidden_states["states"] - weight_func = encoder_hidden_states["weight_func"] - sigma = encoder_hidden_states["sigma"] - - query = attn.to_q(hidden_states) - key = attn.to_k(encoder_states) - value = attn.to_v(encoder_states) - - query = attn.head_to_batch_dim(query) - key = attn.head_to_batch_dim(key) - value = attn.head_to_batch_dim(value) - - if is_xattn and isinstance(img_state, dict): - # use torch.baddbmm method (slow) - attention_scores = get_attention_scores(attn, query, key, attention_mask) - w = img_state[sequence_length].to(query.device) - cross_attention_weight = weight_func(w, sigma, attention_scores) - attention_scores += torch.repeat_interleave( - cross_attention_weight, repeats=attn.heads, dim=0 - ) - - # calc probs - attention_probs = attention_scores.softmax(dim=-1) - attention_probs = attention_probs.to(query.dtype) - hidden_states = torch.bmm(attention_probs, value) - - elif xformers_available: - hidden_states = xformers.ops.memory_efficient_attention( - query.contiguous(), - key.contiguous(), - value.contiguous(), - attn_bias=attention_mask, - ) - hidden_states = hidden_states.to(query.dtype) - - else: - q_bucket_size = 512 - k_bucket_size = 1024 - - # use flash-attention - hidden_states = FlashAttentionFunction.apply( - query.contiguous(), - key.contiguous(), - value.contiguous(), - attention_mask, - False, - q_bucket_size, - k_bucket_size, - ) - hidden_states = hidden_states.to(query.dtype) - - hidden_states = attn.batch_to_head_dim(hidden_states) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - - # dropout - hidden_states = attn.to_out[1](hidden_states) - - return hidden_states - -class ModelWrapper: - def __init__(self, model, alphas_cumprod): - self.model = model - self.alphas_cumprod = alphas_cumprod - - def apply_model(self, *args, **kwargs): - if len(args) == 3: - encoder_hidden_states = args[-1] - args = args[:2] - if kwargs.get("cond", None) is not None: - encoder_hidden_states = kwargs.pop("cond") - return self.model( - *args, encoder_hidden_states=encoder_hidden_states, **kwargs - ).sample - - -class StableDiffusionPipeline(DiffusionPipeline): - - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae, - text_encoder, - tokenizer, - unet, - scheduler, - ): - super().__init__() - - # get correct sigmas from LMS - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - ) - self.setup_unet(self.unet) - self.setup_text_encoder() - - def setup_text_encoder(self, n=1, new_encoder=None): - if new_encoder is not None: - self.text_encoder = new_encoder - - self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder) - self.prompt_parser.CLIP_stop_at_last_layers = n - - def setup_unet(self, unet): - unet = unet.to(self.device) - model = ModelWrapper(unet, self.scheduler.alphas_cumprod) - if self.scheduler.prediction_type == "v_prediction": - self.k_diffusion_model = CompVisVDenoiser(model) - else: - self.k_diffusion_model = CompVisDenoiser(model) - - def get_scheduler(self, scheduler_type: str): - library = importlib.import_module("k_diffusion") - sampling = getattr(library, "sampling") - return getattr(sampling, scheduler_type) - - def encode_sketchs(self, state, scale_ratio=8, g_strength=1.0, text_ids=None): - uncond, cond = text_ids[0], text_ids[1] - - img_state = [] - if state is None: - return torch.FloatTensor(0) - - for k, v in state.items(): - if v["map"] is None: - continue - - v_input = self.tokenizer( - k, - max_length=self.tokenizer.model_max_length, - truncation=True, - add_special_tokens=False, - ).input_ids - - dotmap = v["map"] < 255 - out = dotmap.astype(float) - if v["mask_outsides"]: - out[out==0] = -1 - - arr = torch.from_numpy( - out * float(v["weight"]) * g_strength - ) - img_state.append((v_input, arr)) - - if len(img_state) == 0: - return torch.FloatTensor(0) - - w_tensors = dict() - cond = cond.tolist() - uncond = uncond.tolist() - for layer in self.unet.down_blocks: - c = int(len(cond)) - w, h = img_state[0][1].shape - w_r, h_r = w // scale_ratio, h // scale_ratio - - ret_cond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32) - ret_uncond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32) - - for v_as_tokens, img_where_color in img_state: - is_in = 0 - - ret = ( - F.interpolate( - img_where_color.unsqueeze(0).unsqueeze(1), - scale_factor=1 / scale_ratio, - mode="bilinear", - align_corners=True, - ) - .squeeze() - .reshape(-1, 1) - .repeat(1, len(v_as_tokens)) - ) - - for idx, tok in enumerate(cond): - if cond[idx : idx + len(v_as_tokens)] == v_as_tokens: - is_in = 1 - ret_cond_tensor[0, :, idx : idx + len(v_as_tokens)] += ret - - for idx, tok in enumerate(uncond): - if uncond[idx : idx + len(v_as_tokens)] == v_as_tokens: - is_in = 1 - ret_uncond_tensor[0, :, idx : idx + len(v_as_tokens)] += ret - - if not is_in == 1: - print(f"tokens {v_as_tokens} not found in text") - - w_tensors[w_r * h_r] = torch.cat([ret_uncond_tensor, ret_cond_tensor]) - scale_ratio *= 2 - - return w_tensors - - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.unet.config.attention_head_dim // 2 - self.unet.set_attention_slice(slice_size) - - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [ - self.unet, - self.text_encoder, - self.vae, - self.safety_checker, - ]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - @property - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def decode_latents(self, latents): - latents = latents.to(self.device, dtype=self.vae.dtype) - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - def check_inputs(self, prompt, height, width, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError( - f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" - ) - - if height % 8 != 0 or width % 8 != 0: - raise ValueError( - f"`height` and `width` have to be divisible by 8 but are {height} and {width}." - ) - - if (callback_steps is None) or ( - callback_steps is not None - and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - def prepare_latents( - self, - batch_size, - num_channels_latents, - height, - width, - dtype, - device, - generator, - latents=None, - ): - shape = (batch_size, num_channels_latents, height // 8, width // 8) - if latents is None: - if device.type == "mps": - # randn does not work reproducibly on mps - latents = torch.randn( - shape, generator=generator, device="cpu", dtype=dtype - ).to(device) - else: - latents = torch.randn( - shape, generator=generator, device=device, dtype=dtype - ) - else: - # if latents.shape != shape: - # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - return latents - - def preprocess(self, image): - if isinstance(image, torch.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - w, h = image[0].size - w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 - - image = [ - np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[ - None, : - ] - for i in image - ] - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = image.transpose(0, 3, 1, 2) - image = 2.0 * image - 1.0 - image = torch.from_numpy(image) - elif isinstance(image[0], torch.Tensor): - image = torch.cat(image, dim=0) - return image - - @torch.no_grad() - def img2img( - self, - prompt: Union[str, List[str]], - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - generator: Optional[torch.Generator] = None, - image: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - latents=None, - strength=1.0, - pww_state=None, - pww_attn_weight=1.0, - sampler_name="", - sampler_opt={}, - start_time=-1, - timeout=180, - scale_ratio=8.0, - ): - sampler = self.get_scheduler(sampler_name) - if image is not None: - image = self.preprocess(image) - image = image.to(self.vae.device, dtype=self.vae.dtype) - - init_latents = self.vae.encode(image).latent_dist.sample(generator) - latents = 0.18215 * init_latents - - # 2. Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - device = self._execution_device - latents = latents.to(device, dtype=self.unet.dtype) - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = True - if guidance_scale <= 1.0: - raise ValueError("has to use guidance_scale") - - # 3. Encode input prompt - text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt]) - text_embeddings = text_embeddings.to(self.unet.dtype) - - init_timestep = ( - int(num_inference_steps / min(strength, 0.999)) if strength > 0 else 0 - ) - sigmas = self.get_sigmas(init_timestep, sampler_opt).to( - text_embeddings.device, dtype=text_embeddings.dtype - ) - - t_start = max(init_timestep - num_inference_steps, 0) - sigma_sched = sigmas[t_start:] - - noise = randn_tensor( - latents.shape, - generator=generator, - device=device, - dtype=text_embeddings.dtype, - ) - latents = latents.to(device) - latents = latents + noise * sigma_sched[0] - - # 5. Prepare latent variables - self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) - self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to( - latents.device - ) - - img_state = self.encode_sketchs( - pww_state, - g_strength=pww_attn_weight, - text_ids=text_ids, - ) - - def model_fn(x, sigma): - - if start_time > 0 and timeout > 0: - assert (time.time() - start_time) < timeout, "inference process timed out" - - latent_model_input = torch.cat([x] * 2) - weight_func = lambda w, sigma, qk: w * math.log(1 + sigma) * qk.max() - encoder_state = { - "img_state": img_state, - "states": text_embeddings, - "sigma": sigma[0], - "weight_func": weight_func, - } - - noise_pred = self.k_diffusion_model( - latent_model_input, sigma, cond=encoder_state - ) - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * ( - noise_pred_text - noise_pred_uncond - ) - return noise_pred - - sampler_args = self.get_sampler_extra_args_i2i(sigma_sched, sampler) - latents = sampler(model_fn, latents, **sampler_args) - - # 8. Post-processing - image = self.decode_latents(latents) - - # 10. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - return (image,) - - def get_sigmas(self, steps, params): - discard_next_to_last_sigma = params.get("discard_next_to_last_sigma", False) - steps += 1 if discard_next_to_last_sigma else 0 - - if params.get("scheduler", None) == "karras": - sigma_min, sigma_max = ( - self.k_diffusion_model.sigmas[0].item(), - self.k_diffusion_model.sigmas[-1].item(), - ) - sigmas = k_diffusion.sampling.get_sigmas_karras( - n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device - ) - else: - sigmas = self.k_diffusion_model.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - # https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/48a15821de768fea76e66f26df83df3fddf18f4b/modules/sd_samplers.py#L454 - def get_sampler_extra_args_t2i(self, sigmas, eta, steps, func): - extra_params_kwargs = {} - - if "eta" in inspect.signature(func).parameters: - extra_params_kwargs["eta"] = eta - - if "sigma_min" in inspect.signature(func).parameters: - extra_params_kwargs["sigma_min"] = sigmas[0].item() - extra_params_kwargs["sigma_max"] = sigmas[-1].item() - - if "n" in inspect.signature(func).parameters: - extra_params_kwargs["n"] = steps - else: - extra_params_kwargs["sigmas"] = sigmas - - return extra_params_kwargs - - # https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/48a15821de768fea76e66f26df83df3fddf18f4b/modules/sd_samplers.py#L454 - def get_sampler_extra_args_i2i(self, sigmas, func): - extra_params_kwargs = {} - - if "sigma_min" in inspect.signature(func).parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs["sigma_min"] = sigmas[-2] - - if "sigma_max" in inspect.signature(func).parameters: - extra_params_kwargs["sigma_max"] = sigmas[0] - - if "n" in inspect.signature(func).parameters: - extra_params_kwargs["n"] = len(sigmas) - 1 - - if "sigma_sched" in inspect.signature(func).parameters: - extra_params_kwargs["sigma_sched"] = sigmas - - if "sigmas" in inspect.signature(func).parameters: - extra_params_kwargs["sigmas"] = sigmas - - return extra_params_kwargs - - @torch.no_grad() - def txt2img( - self, - prompt: Union[str, List[str]], - height: int = 512, - width: int = 512, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - callback_steps: Optional[int] = 1, - upscale=False, - upscale_x: float = 2.0, - upscale_method: str = "bicubic", - upscale_antialias: bool = False, - upscale_denoising_strength: int = 0.7, - pww_state=None, - pww_attn_weight=1.0, - sampler_name="", - sampler_opt={}, - start_time=-1, - timeout=180, - ): - sampler = self.get_scheduler(sampler_name) - # 1. Check inputs. Raise error if not correct - self.check_inputs(prompt, height, width, callback_steps) - - # 2. Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = True - if guidance_scale <= 1.0: - raise ValueError("has to use guidance_scale") - - # 3. Encode input prompt - text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt]) - text_embeddings = text_embeddings.to(self.unet.dtype) - - # 4. Prepare timesteps - sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to( - text_embeddings.device, dtype=text_embeddings.dtype - ) - - # 5. Prepare latent variables - num_channels_latents = self.unet.in_channels - latents = self.prepare_latents( - batch_size, - num_channels_latents, - height, - width, - text_embeddings.dtype, - device, - generator, - latents, - ) - latents = latents * sigmas[0] - self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) - self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to( - latents.device - ) - - img_state = self.encode_sketchs( - pww_state, - g_strength=pww_attn_weight, - text_ids=text_ids, - ) - - def model_fn(x, sigma): - - if start_time > 0 and timeout > 0: - assert (time.time() - start_time) < timeout, "inference process timed out" - - latent_model_input = torch.cat([x] * 2) - weight_func = lambda w, sigma, qk: w * math.log(1 + sigma) * qk.max() - encoder_state = { - "img_state": img_state, - "states": text_embeddings, - "sigma": sigma[0], - "weight_func": weight_func, - } - - noise_pred = self.k_diffusion_model( - latent_model_input, sigma, cond=encoder_state - ) - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * ( - noise_pred_text - noise_pred_uncond - ) - return noise_pred - - extra_args = self.get_sampler_extra_args_t2i( - sigmas, eta, num_inference_steps, sampler - ) - latents = sampler(model_fn, latents, **extra_args) - - if upscale: - target_height = height * upscale_x - target_width = width * upscale_x - vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - latents = torch.nn.functional.interpolate( - latents, - size=( - int(target_height // vae_scale_factor), - int(target_width // vae_scale_factor), - ), - mode=upscale_method, - antialias=upscale_antialias, - ) - return self.img2img( - prompt=prompt, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - negative_prompt=negative_prompt, - generator=generator, - latents=latents, - strength=upscale_denoising_strength, - sampler_name=sampler_name, - sampler_opt=sampler_opt, - pww_state=None, - pww_attn_weight=pww_attn_weight / 2, - ) - - # 8. Post-processing - image = self.decode_latents(latents) - - # 10. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - return (image,) - - -class FlashAttentionFunction(Function): - @staticmethod - @torch.no_grad() - def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size): - """Algorithm 2 in the paper""" - - device = q.device - max_neg_value = -torch.finfo(q.dtype).max - qk_len_diff = max(k.shape[-2] - q.shape[-2], 0) - - o = torch.zeros_like(q) - all_row_sums = torch.zeros((*q.shape[:-1], 1), device=device) - all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device=device) - - scale = q.shape[-1] ** -0.5 - - if not exists(mask): - mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size) - else: - mask = rearrange(mask, "b n -> b 1 1 n") - mask = mask.split(q_bucket_size, dim=-1) - - row_splits = zip( - q.split(q_bucket_size, dim=-2), - o.split(q_bucket_size, dim=-2), - mask, - all_row_sums.split(q_bucket_size, dim=-2), - all_row_maxes.split(q_bucket_size, dim=-2), - ) - - for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits): - q_start_index = ind * q_bucket_size - qk_len_diff - - col_splits = zip( - k.split(k_bucket_size, dim=-2), - v.split(k_bucket_size, dim=-2), - ) - - for k_ind, (kc, vc) in enumerate(col_splits): - k_start_index = k_ind * k_bucket_size - - attn_weights = einsum("... i d, ... j d -> ... i j", qc, kc) * scale - - if exists(row_mask): - attn_weights.masked_fill_(~row_mask, max_neg_value) - - if causal and q_start_index < (k_start_index + k_bucket_size - 1): - causal_mask = torch.ones( - (qc.shape[-2], kc.shape[-2]), dtype=torch.bool, device=device - ).triu(q_start_index - k_start_index + 1) - attn_weights.masked_fill_(causal_mask, max_neg_value) - - block_row_maxes = attn_weights.amax(dim=-1, keepdims=True) - attn_weights -= block_row_maxes - exp_weights = torch.exp(attn_weights) - - if exists(row_mask): - exp_weights.masked_fill_(~row_mask, 0.0) - - block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp( - min=EPSILON - ) - - new_row_maxes = torch.maximum(block_row_maxes, row_maxes) - - exp_values = einsum("... i j, ... j d -> ... i d", exp_weights, vc) - - exp_row_max_diff = torch.exp(row_maxes - new_row_maxes) - exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes) - - new_row_sums = ( - exp_row_max_diff * row_sums - + exp_block_row_max_diff * block_row_sums - ) - - oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_( - (exp_block_row_max_diff / new_row_sums) * exp_values - ) - - row_maxes.copy_(new_row_maxes) - row_sums.copy_(new_row_sums) - - lse = all_row_sums.log() + all_row_maxes - - ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size) - ctx.save_for_backward(q, k, v, o, lse) - - return o - - @staticmethod - @torch.no_grad() - def backward(ctx, do): - """Algorithm 4 in the paper""" - - causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args - q, k, v, o, lse = ctx.saved_tensors - - device = q.device - - max_neg_value = -torch.finfo(q.dtype).max - qk_len_diff = max(k.shape[-2] - q.shape[-2], 0) - - dq = torch.zeros_like(q) - dk = torch.zeros_like(k) - dv = torch.zeros_like(v) - - row_splits = zip( - q.split(q_bucket_size, dim=-2), - o.split(q_bucket_size, dim=-2), - do.split(q_bucket_size, dim=-2), - mask, - lse.split(q_bucket_size, dim=-2), - dq.split(q_bucket_size, dim=-2), - ) - - for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits): - q_start_index = ind * q_bucket_size - qk_len_diff - - col_splits = zip( - k.split(k_bucket_size, dim=-2), - v.split(k_bucket_size, dim=-2), - dk.split(k_bucket_size, dim=-2), - dv.split(k_bucket_size, dim=-2), - ) - - for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits): - k_start_index = k_ind * k_bucket_size - - attn_weights = einsum("... i d, ... j d -> ... i j", qc, kc) * scale - - if causal and q_start_index < (k_start_index + k_bucket_size - 1): - causal_mask = torch.ones( - (qc.shape[-2], kc.shape[-2]), dtype=torch.bool, device=device - ).triu(q_start_index - k_start_index + 1) - attn_weights.masked_fill_(causal_mask, max_neg_value) - - p = torch.exp(attn_weights - lsec) - - if exists(row_mask): - p.masked_fill_(~row_mask, 0.0) - - dv_chunk = einsum("... i j, ... i d -> ... j d", p, doc) - dp = einsum("... i d, ... j d -> ... i j", doc, vc) - - D = (doc * oc).sum(dim=-1, keepdims=True) - ds = p * scale * (dp - D) - - dq_chunk = einsum("... i j, ... j d -> ... i d", ds, kc) - dk_chunk = einsum("... i j, ... i d -> ... j d", ds, qc) - - dqc.add_(dq_chunk) - dkc.add_(dk_chunk) - dvc.add_(dv_chunk) - - return dq, dk, dv, None, None, None, None diff --git a/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-carousel.js b/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-carousel.js deleted file mode 100644 index 229edba242bb190698662cdce6bdacde9f0769fe..0000000000000000000000000000000000000000 --- a/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-carousel.js +++ /dev/null @@ -1,2371 +0,0 @@ -(function webpackUniversalModuleDefinition(root, factory) { - if(typeof exports === 'object' && typeof module === 'object') - module.exports = factory(); - else if(typeof define === 'function' && define.amd) - define([], factory); - else if(typeof exports === 'object') - exports["bulmaCarousel"] = factory(); - else - root["bulmaCarousel"] = factory(); -})(typeof self !== 'undefined' ? self : this, function() { -return /******/ (function(modules) { // webpackBootstrap -/******/ // The module cache -/******/ var installedModules = {}; -/******/ -/******/ // The require function -/******/ function __webpack_require__(moduleId) { -/******/ -/******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) { -/******/ return installedModules[moduleId].exports; -/******/ } -/******/ // Create a new module (and put it into the cache) -/******/ var module = installedModules[moduleId] = { -/******/ i: moduleId, -/******/ l: false, -/******/ exports: {} -/******/ }; -/******/ -/******/ // Execute the module function -/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); -/******/ -/******/ // Flag the module as loaded -/******/ module.l = true; -/******/ -/******/ // Return the exports of the module -/******/ return module.exports; -/******/ } -/******/ -/******/ -/******/ // expose the modules object (__webpack_modules__) -/******/ __webpack_require__.m = modules; -/******/ -/******/ // expose the module cache -/******/ __webpack_require__.c = installedModules; -/******/ -/******/ // define getter function for harmony exports -/******/ __webpack_require__.d = function(exports, name, getter) { -/******/ if(!__webpack_require__.o(exports, name)) { -/******/ Object.defineProperty(exports, name, { -/******/ configurable: false, -/******/ enumerable: true, -/******/ get: getter -/******/ }); -/******/ } -/******/ }; -/******/ -/******/ // getDefaultExport function for compatibility with non-harmony modules -/******/ __webpack_require__.n = function(module) { -/******/ var getter = module && module.__esModule ? -/******/ function getDefault() { return module['default']; } : -/******/ function getModuleExports() { return module; }; -/******/ __webpack_require__.d(getter, 'a', getter); -/******/ return getter; -/******/ }; -/******/ -/******/ // Object.prototype.hasOwnProperty.call -/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; -/******/ -/******/ // __webpack_public_path__ -/******/ __webpack_require__.p = ""; -/******/ -/******/ // Load entry module and return exports -/******/ return __webpack_require__(__webpack_require__.s = 5); -/******/ }) -/************************************************************************/ -/******/ ([ -/* 0 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* unused harmony export addClasses */ -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return removeClasses; }); -/* unused harmony export show */ -/* unused harmony export hide */ -/* unused harmony export offset */ -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return width; }); -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return height; }); -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return outerHeight; }); -/* unused harmony export outerWidth */ -/* unused harmony export position */ -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return css; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__type__ = __webpack_require__(2); - - -var addClasses = function addClasses(element, classes) { - classes = Array.isArray(classes) ? classes : classes.split(' '); - classes.forEach(function (cls) { - element.classList.add(cls); - }); -}; - -var removeClasses = function removeClasses(element, classes) { - classes = Array.isArray(classes) ? classes : classes.split(' '); - classes.forEach(function (cls) { - element.classList.remove(cls); - }); -}; - -var show = function show(elements) { - elements = Array.isArray(elements) ? elements : [elements]; - elements.forEach(function (element) { - element.style.display = ''; - }); -}; - -var hide = function hide(elements) { - elements = Array.isArray(elements) ? elements : [elements]; - elements.forEach(function (element) { - element.style.display = 'none'; - }); -}; - -var offset = function offset(element) { - var rect = element.getBoundingClientRect(); - return { - top: rect.top + document.body.scrollTop, - left: rect.left + document.body.scrollLeft - }; -}; - -// returns an element's width -var width = function width(element) { - return element.getBoundingClientRect().width || element.offsetWidth; -}; -// returns an element's height -var height = function height(element) { - return element.getBoundingClientRect().height || element.offsetHeight; -}; - -var outerHeight = function outerHeight(element) { - var withMargin = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; - - var height = element.offsetHeight; - if (withMargin) { - var style = window.getComputedStyle(element); - height += parseInt(style.marginTop) + parseInt(style.marginBottom); - } - return height; -}; - -var outerWidth = function outerWidth(element) { - var withMargin = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; - - var width = element.offsetWidth; - if (withMargin) { - var style = window.getComputedStyle(element); - width += parseInt(style.marginLeft) + parseInt(style.marginRight); - } - return width; -}; - -var position = function position(element) { - return { - left: element.offsetLeft, - top: element.offsetTop - }; -}; - -var css = function css(element, obj) { - if (!obj) { - return window.getComputedStyle(element); - } - if (Object(__WEBPACK_IMPORTED_MODULE_0__type__["b" /* isObject */])(obj)) { - var style = ''; - Object.keys(obj).forEach(function (key) { - style += key + ': ' + obj[key] + ';'; - }); - - element.style.cssText += style; - } -}; - -/***/ }), -/* 1 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (immutable) */ __webpack_exports__["a"] = detectSupportsPassive; -function detectSupportsPassive() { - var supportsPassive = false; - - try { - var opts = Object.defineProperty({}, 'passive', { - get: function get() { - supportsPassive = true; - } - }); - - window.addEventListener('testPassive', null, opts); - window.removeEventListener('testPassive', null, opts); - } catch (e) {} - - return supportsPassive; -} - -/***/ }), -/* 2 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return isFunction; }); -/* unused harmony export isNumber */ -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return isString; }); -/* unused harmony export isDate */ -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return isObject; }); -/* unused harmony export isEmptyObject */ -/* unused harmony export isNode */ -/* unused harmony export isVideo */ -/* unused harmony export isHTML5 */ -/* unused harmony export isIFrame */ -/* unused harmony export isYoutube */ -/* unused harmony export isVimeo */ -var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; - -var isFunction = function isFunction(unknown) { - return typeof unknown === 'function'; -}; -var isNumber = function isNumber(unknown) { - return typeof unknown === "number"; -}; -var isString = function isString(unknown) { - return typeof unknown === 'string' || !!unknown && (typeof unknown === 'undefined' ? 'undefined' : _typeof(unknown)) === 'object' && Object.prototype.toString.call(unknown) === '[object String]'; -}; -var isDate = function isDate(unknown) { - return (Object.prototype.toString.call(unknown) === '[object Date]' || unknown instanceof Date) && !isNaN(unknown.valueOf()); -}; -var isObject = function isObject(unknown) { - return (typeof unknown === 'function' || (typeof unknown === 'undefined' ? 'undefined' : _typeof(unknown)) === 'object' && !!unknown) && !Array.isArray(unknown); -}; -var isEmptyObject = function isEmptyObject(unknown) { - for (var name in unknown) { - if (unknown.hasOwnProperty(name)) { - return false; - } - } - return true; -}; - -var isNode = function isNode(unknown) { - return !!(unknown && unknown.nodeType === HTMLElement | SVGElement); -}; -var isVideo = function isVideo(unknown) { - return isYoutube(unknown) || isVimeo(unknown) || isHTML5(unknown); -}; -var isHTML5 = function isHTML5(unknown) { - return isNode(unknown) && unknown.tagName === 'VIDEO'; -}; -var isIFrame = function isIFrame(unknown) { - return isNode(unknown) && unknown.tagName === 'IFRAME'; -}; -var isYoutube = function isYoutube(unknown) { - return isIFrame(unknown) && !!unknown.src.match(/\/\/.*?youtube(-nocookie)?\.[a-z]+\/(watch\?v=[^&\s]+|embed)|youtu\.be\/.*/); -}; -var isVimeo = function isVimeo(unknown) { - return isIFrame(unknown) && !!unknown.src.match(/vimeo\.com\/video\/.*/); -}; - -/***/ }), -/* 3 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } } - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -var EventEmitter = function () { - function EventEmitter() { - var events = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : []; - - _classCallCheck(this, EventEmitter); - - this.events = new Map(events); - } - - _createClass(EventEmitter, [{ - key: "on", - value: function on(name, cb) { - var _this = this; - - this.events.set(name, [].concat(_toConsumableArray(this.events.has(name) ? this.events.get(name) : []), [cb])); - - return function () { - return _this.events.set(name, _this.events.get(name).filter(function (fn) { - return fn !== cb; - })); - }; - } - }, { - key: "emit", - value: function emit(name) { - for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { - args[_key - 1] = arguments[_key]; - } - - return this.events.has(name) && this.events.get(name).map(function (fn) { - return fn.apply(undefined, args); - }); - } - }]); - - return EventEmitter; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (EventEmitter); - -/***/ }), -/* 4 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -var Coordinate = function () { - function Coordinate() { - var x = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0; - var y = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 0; - - _classCallCheck(this, Coordinate); - - this._x = x; - this._y = y; - } - - _createClass(Coordinate, [{ - key: 'add', - value: function add(coord) { - return new Coordinate(this._x + coord._x, this._y + coord._y); - } - }, { - key: 'sub', - value: function sub(coord) { - return new Coordinate(this._x - coord._x, this._y - coord._y); - } - }, { - key: 'distance', - value: function distance(coord) { - var deltaX = this._x - coord._x; - var deltaY = this._y - coord._y; - - return Math.sqrt(Math.pow(deltaX, 2) + Math.pow(deltaY, 2)); - } - }, { - key: 'max', - value: function max(coord) { - var x = Math.max(this._x, coord._x); - var y = Math.max(this._y, coord._y); - - return new Coordinate(x, y); - } - }, { - key: 'equals', - value: function equals(coord) { - if (this == coord) { - return true; - } - if (!coord || coord == null) { - return false; - } - return this._x == coord._x && this._y == coord._y; - } - }, { - key: 'inside', - value: function inside(northwest, southeast) { - if (this._x >= northwest._x && this._x <= southeast._x && this._y >= northwest._y && this._y <= southeast._y) { - - return true; - } - return false; - } - }, { - key: 'constrain', - value: function constrain(min, max) { - if (min._x > max._x || min._y > max._y) { - return this; - } - - var x = this._x, - y = this._y; - - if (min._x !== null) { - x = Math.max(x, min._x); - } - if (max._x !== null) { - x = Math.min(x, max._x); - } - if (min._y !== null) { - y = Math.max(y, min._y); - } - if (max._y !== null) { - y = Math.min(y, max._y); - } - - return new Coordinate(x, y); - } - }, { - key: 'reposition', - value: function reposition(element) { - element.style['top'] = this._y + 'px'; - element.style['left'] = this._x + 'px'; - } - }, { - key: 'toString', - value: function toString() { - return '(' + this._x + ',' + this._y + ')'; - } - }, { - key: 'x', - get: function get() { - return this._x; - }, - set: function set() { - var value = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0; - - this._x = value; - return this; - } - }, { - key: 'y', - get: function get() { - return this._y; - }, - set: function set() { - var value = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0; - - this._y = value; - return this; - } - }]); - - return Coordinate; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Coordinate); - -/***/ }), -/* 5 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -Object.defineProperty(__webpack_exports__, "__esModule", { value: true }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_index__ = __webpack_require__(6); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_css__ = __webpack_require__(0); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__utils_type__ = __webpack_require__(2); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__utils_eventEmitter__ = __webpack_require__(3); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__components_autoplay__ = __webpack_require__(7); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__components_breakpoint__ = __webpack_require__(9); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__components_infinite__ = __webpack_require__(10); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_7__components_loop__ = __webpack_require__(11); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_8__components_navigation__ = __webpack_require__(13); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_9__components_pagination__ = __webpack_require__(15); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_10__components_swipe__ = __webpack_require__(18); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_11__components_transitioner__ = __webpack_require__(19); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_12__defaultOptions__ = __webpack_require__(22); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_13__templates__ = __webpack_require__(23); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_14__templates_item__ = __webpack_require__(24); -var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } - -function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } - - - - - - - - - - - - - - - - - - - -var bulmaCarousel = function (_EventEmitter) { - _inherits(bulmaCarousel, _EventEmitter); - - function bulmaCarousel(selector) { - var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; - - _classCallCheck(this, bulmaCarousel); - - var _this = _possibleConstructorReturn(this, (bulmaCarousel.__proto__ || Object.getPrototypeOf(bulmaCarousel)).call(this)); - - _this.element = Object(__WEBPACK_IMPORTED_MODULE_2__utils_type__["c" /* isString */])(selector) ? document.querySelector(selector) : selector; - // An invalid selector or non-DOM node has been provided. - if (!_this.element) { - throw new Error('An invalid selector or non-DOM node has been provided.'); - } - _this._clickEvents = ['click', 'touch']; - - // Use Element dataset values to override options - var elementConfig = _this.element.dataset ? Object.keys(_this.element.dataset).filter(function (key) { - return Object.keys(__WEBPACK_IMPORTED_MODULE_12__defaultOptions__["a" /* default */]).includes(key); - }).reduce(function (obj, key) { - return _extends({}, obj, _defineProperty({}, key, _this.element.dataset[key])); - }, {}) : {}; - // Set default options - dataset attributes are master - _this.options = _extends({}, __WEBPACK_IMPORTED_MODULE_12__defaultOptions__["a" /* default */], options, elementConfig); - - _this._id = Object(__WEBPACK_IMPORTED_MODULE_0__utils_index__["a" /* uuid */])('slider'); - - _this.onShow = _this.onShow.bind(_this); - - // Initiate plugin - _this._init(); - return _this; - } - - /** - * Initiate all DOM element containing datePicker class - * @method - * @return {Array} Array of all datePicker instances - */ - - - _createClass(bulmaCarousel, [{ - key: '_init', - - - /**************************************************** - * * - * PRIVATE FUNCTIONS * - * * - ****************************************************/ - /** - * Initiate plugin instance - * @method _init - * @return {Slider} Current plugin instance - */ - value: function _init() { - this._items = Array.from(this.element.children); - - // Load plugins - this._breakpoint = new __WEBPACK_IMPORTED_MODULE_5__components_breakpoint__["a" /* default */](this); - this._autoplay = new __WEBPACK_IMPORTED_MODULE_4__components_autoplay__["a" /* default */](this); - this._navigation = new __WEBPACK_IMPORTED_MODULE_8__components_navigation__["a" /* default */](this); - this._pagination = new __WEBPACK_IMPORTED_MODULE_9__components_pagination__["a" /* default */](this); - this._infinite = new __WEBPACK_IMPORTED_MODULE_6__components_infinite__["a" /* default */](this); - this._loop = new __WEBPACK_IMPORTED_MODULE_7__components_loop__["a" /* default */](this); - this._swipe = new __WEBPACK_IMPORTED_MODULE_10__components_swipe__["a" /* default */](this); - - this._build(); - - if (Object(__WEBPACK_IMPORTED_MODULE_2__utils_type__["a" /* isFunction */])(this.options.onReady)) { - this.options.onReady(this); - } - - return this; - } - - /** - * Build Slider HTML component and append it to the DOM - * @method _build - */ - - }, { - key: '_build', - value: function _build() { - var _this2 = this; - - // Generate HTML Fragment of template - this.node = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_13__templates__["a" /* default */])(this.id)); - // Save pointers to template parts - this._ui = { - wrapper: this.node.firstChild, - container: this.node.querySelector('.slider-container') - - // Add slider to DOM - };this.element.appendChild(this.node); - this._ui.wrapper.classList.add('is-loading'); - this._ui.container.style.opacity = 0; - - this._transitioner = new __WEBPACK_IMPORTED_MODULE_11__components_transitioner__["a" /* default */](this); - - // Wrap all items by slide element - this._slides = this._items.map(function (item, index) { - return _this2._createSlide(item, index); - }); - - this.reset(); - - this._bindEvents(); - - this._ui.container.style.opacity = 1; - this._ui.wrapper.classList.remove('is-loading'); - } - - /** - * Bind all events - * @method _bindEvents - * @return {void} - */ - - }, { - key: '_bindEvents', - value: function _bindEvents() { - this.on('show', this.onShow); - } - }, { - key: '_unbindEvents', - value: function _unbindEvents() { - this.off('show', this.onShow); - } - }, { - key: '_createSlide', - value: function _createSlide(item, index) { - var slide = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_14__templates_item__["a" /* default */])()).firstChild; - slide.dataset.sliderIndex = index; - slide.appendChild(item); - return slide; - } - - /** - * Calculate slider dimensions - */ - - }, { - key: '_setDimensions', - value: function _setDimensions() { - var _this3 = this; - - if (!this.options.vertical) { - if (this.options.centerMode) { - this._ui.wrapper.style.padding = '0px ' + this.options.centerPadding; - } - } else { - this._ui.wrapper.style.height = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._slides[0]) * this.slidesToShow; - if (this.options.centerMode) { - this._ui.wrapper.style.padding = this.options.centerPadding + ' 0px'; - } - } - - this._wrapperWidth = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["e" /* width */])(this._ui.wrapper); - this._wrapperHeight = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._ui.wrapper); - - if (!this.options.vertical) { - this._slideWidth = Math.ceil(this._wrapperWidth / this.slidesToShow); - this._containerWidth = Math.ceil(this._slideWidth * this._slides.length); - this._ui.container.style.width = this._containerWidth + 'px'; - } else { - this._slideWidth = Math.ceil(this._wrapperWidth); - this._containerHeight = Math.ceil(Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._slides[0]) * this._slides.length); - this._ui.container.style.height = this._containerHeight + 'px'; - } - - this._slides.forEach(function (slide) { - slide.style.width = _this3._slideWidth + 'px'; - }); - } - }, { - key: '_setHeight', - value: function _setHeight() { - if (this.options.effect !== 'translate') { - this._ui.container.style.height = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._slides[this.state.index]) + 'px'; - } - } - - // Update slides classes - - }, { - key: '_setClasses', - value: function _setClasses() { - var _this4 = this; - - this._slides.forEach(function (slide) { - Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["d" /* removeClasses */])(slide, 'is-active is-current is-slide-previous is-slide-next'); - if (Math.abs((_this4.state.index - 1) % _this4.state.length) === parseInt(slide.dataset.sliderIndex, 10)) { - slide.classList.add('is-slide-previous'); - } - if (Math.abs(_this4.state.index % _this4.state.length) === parseInt(slide.dataset.sliderIndex, 10)) { - slide.classList.add('is-current'); - } - if (Math.abs((_this4.state.index + 1) % _this4.state.length) === parseInt(slide.dataset.sliderIndex, 10)) { - slide.classList.add('is-slide-next'); - } - }); - } - - /**************************************************** - * * - * GETTERS and SETTERS * - * * - ****************************************************/ - - /** - * Get id of current datePicker - */ - - }, { - key: 'onShow', - - - /**************************************************** - * * - * EVENTS FUNCTIONS * - * * - ****************************************************/ - value: function onShow(e) { - this._navigation.refresh(); - this._pagination.refresh(); - this._setClasses(); - } - - /**************************************************** - * * - * PUBLIC FUNCTIONS * - * * - ****************************************************/ - - }, { - key: 'next', - value: function next() { - if (!this.options.loop && !this.options.infinite && this.state.index + this.slidesToScroll > this.state.length - this.slidesToShow && !this.options.centerMode) { - this.state.next = this.state.index; - } else { - this.state.next = this.state.index + this.slidesToScroll; - } - this.show(); - } - }, { - key: 'previous', - value: function previous() { - if (!this.options.loop && !this.options.infinite && this.state.index === 0) { - this.state.next = this.state.index; - } else { - this.state.next = this.state.index - this.slidesToScroll; - } - this.show(); - } - }, { - key: 'start', - value: function start() { - this._autoplay.start(); - } - }, { - key: 'pause', - value: function pause() { - this._autoplay.pause(); - } - }, { - key: 'stop', - value: function stop() { - this._autoplay.stop(); - } - }, { - key: 'show', - value: function show(index) { - var force = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; - - // If all slides are already visible then return - if (!this.state.length || this.state.length <= this.slidesToShow) { - return; - } - - if (typeof index === 'Number') { - this.state.next = index; - } - - if (this.options.loop) { - this._loop.apply(); - } - if (this.options.infinite) { - this._infinite.apply(); - } - - // If new slide is already the current one then return - if (this.state.index === this.state.next) { - return; - } - - this.emit('before:show', this.state); - this._transitioner.apply(force, this._setHeight.bind(this)); - this.emit('after:show', this.state); - - this.emit('show', this); - } - }, { - key: 'reset', - value: function reset() { - var _this5 = this; - - this.state = { - length: this._items.length, - index: Math.abs(this.options.initialSlide), - next: Math.abs(this.options.initialSlide), - prev: undefined - }; - - // Fix options - if (this.options.loop && this.options.infinite) { - this.options.loop = false; - } - if (this.options.slidesToScroll > this.options.slidesToShow) { - this.options.slidesToScroll = this.slidesToShow; - } - this._breakpoint.init(); - - if (this.state.index >= this.state.length && this.state.index !== 0) { - this.state.index = this.state.index - this.slidesToScroll; - } - if (this.state.length <= this.slidesToShow) { - this.state.index = 0; - } - - this._ui.wrapper.appendChild(this._navigation.init().render()); - this._ui.wrapper.appendChild(this._pagination.init().render()); - - if (this.options.navigationSwipe) { - this._swipe.bindEvents(); - } else { - this._swipe._bindEvents(); - } - - this._breakpoint.apply(); - // Move all created slides into slider - this._slides.forEach(function (slide) { - return _this5._ui.container.appendChild(slide); - }); - this._transitioner.init().apply(true, this._setHeight.bind(this)); - - if (this.options.autoplay) { - this._autoplay.init().start(); - } - } - - /** - * Destroy Slider - * @method destroy - */ - - }, { - key: 'destroy', - value: function destroy() { - var _this6 = this; - - this._unbindEvents(); - this._items.forEach(function (item) { - _this6.element.appendChild(item); - }); - this.node.remove(); - } - }, { - key: 'id', - get: function get() { - return this._id; - } - }, { - key: 'index', - set: function set(index) { - this._index = index; - }, - get: function get() { - return this._index; - } - }, { - key: 'length', - set: function set(length) { - this._length = length; - }, - get: function get() { - return this._length; - } - }, { - key: 'slides', - get: function get() { - return this._slides; - }, - set: function set(slides) { - this._slides = slides; - } - }, { - key: 'slidesToScroll', - get: function get() { - return this.options.effect === 'translate' ? this._breakpoint.getSlidesToScroll() : 1; - } - }, { - key: 'slidesToShow', - get: function get() { - return this.options.effect === 'translate' ? this._breakpoint.getSlidesToShow() : 1; - } - }, { - key: 'direction', - get: function get() { - return this.element.dir.toLowerCase() === 'rtl' || this.element.style.direction === 'rtl' ? 'rtl' : 'ltr'; - } - }, { - key: 'wrapper', - get: function get() { - return this._ui.wrapper; - } - }, { - key: 'wrapperWidth', - get: function get() { - return this._wrapperWidth || 0; - } - }, { - key: 'container', - get: function get() { - return this._ui.container; - } - }, { - key: 'containerWidth', - get: function get() { - return this._containerWidth || 0; - } - }, { - key: 'slideWidth', - get: function get() { - return this._slideWidth || 0; - } - }, { - key: 'transitioner', - get: function get() { - return this._transitioner; - } - }], [{ - key: 'attach', - value: function attach() { - var _this7 = this; - - var selector = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : '.slider'; - var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; - - var instances = new Array(); - - var elements = Object(__WEBPACK_IMPORTED_MODULE_2__utils_type__["c" /* isString */])(selector) ? document.querySelectorAll(selector) : Array.isArray(selector) ? selector : [selector]; - [].forEach.call(elements, function (element) { - if (typeof element[_this7.constructor.name] === 'undefined') { - var instance = new bulmaCarousel(element, options); - element[_this7.constructor.name] = instance; - instances.push(instance); - } else { - instances.push(element[_this7.constructor.name]); - } - }); - - return instances; - } - }]); - - return bulmaCarousel; -}(__WEBPACK_IMPORTED_MODULE_3__utils_eventEmitter__["a" /* default */]); - -/* harmony default export */ __webpack_exports__["default"] = (bulmaCarousel); - -/***/ }), -/* 6 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return uuid; }); -/* unused harmony export isRtl */ -/* unused harmony export defer */ -/* unused harmony export getNodeIndex */ -/* unused harmony export camelize */ -function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } } - -var uuid = function uuid() { - var prefix = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ''; - return prefix + ([1e7] + -1e3 + -4e3 + -8e3 + -1e11).replace(/[018]/g, function (c) { - return (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16); - }); -}; -var isRtl = function isRtl() { - return document.documentElement.getAttribute('dir') === 'rtl'; -}; - -var defer = function defer() { - this.promise = new Promise(function (resolve, reject) { - this.resolve = resolve; - this.reject = reject; - }.bind(this)); - - this.then = this.promise.then.bind(this.promise); - this.catch = this.promise.catch.bind(this.promise); -}; - -var getNodeIndex = function getNodeIndex(node) { - return [].concat(_toConsumableArray(node.parentNode.children)).indexOf(node); -}; -var camelize = function camelize(str) { - return str.replace(/-(\w)/g, toUpper); -}; - -/***/ }), -/* 7 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_eventEmitter__ = __webpack_require__(3); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_device__ = __webpack_require__(8); -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } - -function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } - - - - -var onVisibilityChange = Symbol('onVisibilityChange'); -var onMouseEnter = Symbol('onMouseEnter'); -var onMouseLeave = Symbol('onMouseLeave'); - -var defaultOptions = { - autoplay: false, - autoplaySpeed: 3000 -}; - -var Autoplay = function (_EventEmitter) { - _inherits(Autoplay, _EventEmitter); - - function Autoplay(slider) { - _classCallCheck(this, Autoplay); - - var _this = _possibleConstructorReturn(this, (Autoplay.__proto__ || Object.getPrototypeOf(Autoplay)).call(this)); - - _this.slider = slider; - - _this.onVisibilityChange = _this.onVisibilityChange.bind(_this); - _this.onMouseEnter = _this.onMouseEnter.bind(_this); - _this.onMouseLeave = _this.onMouseLeave.bind(_this); - return _this; - } - - _createClass(Autoplay, [{ - key: 'init', - value: function init() { - this._bindEvents(); - return this; - } - }, { - key: '_bindEvents', - value: function _bindEvents() { - document.addEventListener('visibilitychange', this.onVisibilityChange); - if (this.slider.options.pauseOnHover) { - this.slider.container.addEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["a" /* pointerEnter */], this.onMouseEnter); - this.slider.container.addEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["b" /* pointerLeave */], this.onMouseLeave); - } - } - }, { - key: '_unbindEvents', - value: function _unbindEvents() { - document.removeEventListener('visibilitychange', this.onVisibilityChange); - this.slider.container.removeEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["a" /* pointerEnter */], this.onMouseEnter); - this.slider.container.removeEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["b" /* pointerLeave */], this.onMouseLeave); - } - }, { - key: 'start', - value: function start() { - var _this2 = this; - - this.stop(); - if (this.slider.options.autoplay) { - this.emit('start', this); - this._interval = setInterval(function () { - if (!(_this2._hovering && _this2.slider.options.pauseOnHover)) { - if (!_this2.slider.options.centerMode && _this2.slider.state.next >= _this2.slider.state.length - _this2.slider.slidesToShow && !_this2.slider.options.loop && !_this2.slider.options.infinite) { - _this2.stop(); - } else { - _this2.slider.next(); - } - } - }, this.slider.options.autoplaySpeed); - } - } - }, { - key: 'stop', - value: function stop() { - this._interval = clearInterval(this._interval); - this.emit('stop', this); - } - }, { - key: 'pause', - value: function pause() { - var _this3 = this; - - var speed = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0; - - if (this.paused) { - return; - } - if (this.timer) { - this.stop(); - } - this.paused = true; - if (speed === 0) { - this.paused = false; - this.start(); - } else { - this.slider.on('transition:end', function () { - if (!_this3) { - return; - } - _this3.paused = false; - if (!_this3.run) { - _this3.stop(); - } else { - _this3.start(); - } - }); - } - } - }, { - key: 'onVisibilityChange', - value: function onVisibilityChange(e) { - if (document.hidden) { - this.stop(); - } else { - this.start(); - } - } - }, { - key: 'onMouseEnter', - value: function onMouseEnter(e) { - this._hovering = true; - if (this.slider.options.pauseOnHover) { - this.pause(); - } - } - }, { - key: 'onMouseLeave', - value: function onMouseLeave(e) { - this._hovering = false; - if (this.slider.options.pauseOnHover) { - this.pause(); - } - } - }]); - - return Autoplay; -}(__WEBPACK_IMPORTED_MODULE_0__utils_eventEmitter__["a" /* default */]); - -/* harmony default export */ __webpack_exports__["a"] = (Autoplay); - -/***/ }), -/* 8 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* unused harmony export isIE */ -/* unused harmony export isIETouch */ -/* unused harmony export isAndroid */ -/* unused harmony export isiPad */ -/* unused harmony export isiPod */ -/* unused harmony export isiPhone */ -/* unused harmony export isSafari */ -/* unused harmony export isUiWebView */ -/* unused harmony export supportsTouchEvents */ -/* unused harmony export supportsPointerEvents */ -/* unused harmony export supportsTouch */ -/* unused harmony export pointerDown */ -/* unused harmony export pointerMove */ -/* unused harmony export pointerUp */ -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return pointerEnter; }); -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return pointerLeave; }); -var isIE = window.navigator.pointerEnabled || window.navigator.msPointerEnabled; -var isIETouch = window.navigator.msPointerEnabled && window.navigator.msMaxTouchPoints > 1 || window.navigator.pointerEnabled && window.navigator.maxTouchPoints > 1; -var isAndroid = navigator.userAgent.match(/(Android);?[\s\/]+([\d.]+)?/); -var isiPad = navigator.userAgent.match(/(iPad).*OS\s([\d_]+)/); -var isiPod = navigator.userAgent.match(/(iPod)(.*OS\s([\d_]+))?/); -var isiPhone = !navigator.userAgent.match(/(iPad).*OS\s([\d_]+)/) && navigator.userAgent.match(/(iPhone\sOS)\s([\d_]+)/); -var isSafari = navigator.userAgent.toLowerCase().indexOf('safari') >= 0 && navigator.userAgent.toLowerCase().indexOf('chrome') < 0 && navigator.userAgent.toLowerCase().indexOf('android') < 0; -var isUiWebView = /(iPhone|iPod|iPad).*AppleWebKit(?!.*Safari)/i.test(navigator.userAgent); - -var supportsTouchEvents = !!('ontouchstart' in window); -var supportsPointerEvents = !!('PointerEvent' in window); -var supportsTouch = supportsTouchEvents || window.DocumentTouch && document instanceof DocumentTouch || navigator.maxTouchPoints; // IE >=11 -var pointerDown = !supportsTouch ? 'mousedown' : 'mousedown ' + (supportsTouchEvents ? 'touchstart' : 'pointerdown'); -var pointerMove = !supportsTouch ? 'mousemove' : 'mousemove ' + (supportsTouchEvents ? 'touchmove' : 'pointermove'); -var pointerUp = !supportsTouch ? 'mouseup' : 'mouseup ' + (supportsTouchEvents ? 'touchend' : 'pointerup'); -var pointerEnter = supportsTouch && supportsPointerEvents ? 'pointerenter' : 'mouseenter'; -var pointerLeave = supportsTouch && supportsPointerEvents ? 'pointerleave' : 'mouseleave'; - -/***/ }), -/* 9 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -var onResize = Symbol('onResize'); - -var Breakpoints = function () { - function Breakpoints(slider) { - _classCallCheck(this, Breakpoints); - - this.slider = slider; - this.options = slider.options; - - this[onResize] = this[onResize].bind(this); - - this._bindEvents(); - } - - _createClass(Breakpoints, [{ - key: 'init', - value: function init() { - this._defaultBreakpoint = { - slidesToShow: this.options.slidesToShow, - slidesToScroll: this.options.slidesToScroll - }; - this.options.breakpoints.sort(function (a, b) { - return parseInt(a.changePoint, 10) > parseInt(b.changePoint, 10); - }); - this._currentBreakpoint = this._getActiveBreakpoint(); - - return this; - } - }, { - key: 'destroy', - value: function destroy() { - this._unbindEvents(); - } - }, { - key: '_bindEvents', - value: function _bindEvents() { - window.addEventListener('resize', this[onResize]); - window.addEventListener('orientationchange', this[onResize]); - } - }, { - key: '_unbindEvents', - value: function _unbindEvents() { - window.removeEventListener('resize', this[onResize]); - window.removeEventListener('orientationchange', this[onResize]); - } - }, { - key: '_getActiveBreakpoint', - value: function _getActiveBreakpoint() { - //Get breakpoint for window width - var _iteratorNormalCompletion = true; - var _didIteratorError = false; - var _iteratorError = undefined; - - try { - for (var _iterator = this.options.breakpoints[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { - var point = _step.value; - - if (point.changePoint >= window.innerWidth) { - return point; - } - } - } catch (err) { - _didIteratorError = true; - _iteratorError = err; - } finally { - try { - if (!_iteratorNormalCompletion && _iterator.return) { - _iterator.return(); - } - } finally { - if (_didIteratorError) { - throw _iteratorError; - } - } - } - - return this._defaultBreakpoint; - } - }, { - key: 'getSlidesToShow', - value: function getSlidesToShow() { - return this._currentBreakpoint ? this._currentBreakpoint.slidesToShow : this._defaultBreakpoint.slidesToShow; - } - }, { - key: 'getSlidesToScroll', - value: function getSlidesToScroll() { - return this._currentBreakpoint ? this._currentBreakpoint.slidesToScroll : this._defaultBreakpoint.slidesToScroll; - } - }, { - key: 'apply', - value: function apply() { - if (this.slider.state.index >= this.slider.state.length && this.slider.state.index !== 0) { - this.slider.state.index = this.slider.state.index - this._currentBreakpoint.slidesToScroll; - } - if (this.slider.state.length <= this._currentBreakpoint.slidesToShow) { - this.slider.state.index = 0; - } - - if (this.options.loop) { - this.slider._loop.init().apply(); - } - - if (this.options.infinite) { - this.slider._infinite.init().apply(); - } - - this.slider._setDimensions(); - this.slider._transitioner.init().apply(true, this.slider._setHeight.bind(this.slider)); - this.slider._setClasses(); - - this.slider._navigation.refresh(); - this.slider._pagination.refresh(); - } - }, { - key: onResize, - value: function value(e) { - var newBreakPoint = this._getActiveBreakpoint(); - if (newBreakPoint.slidesToShow !== this._currentBreakpoint.slidesToShow) { - this._currentBreakpoint = newBreakPoint; - this.apply(); - } - } - }]); - - return Breakpoints; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Breakpoints); - -/***/ }), -/* 10 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } } - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - -var Infinite = function () { - function Infinite(slider) { - _classCallCheck(this, Infinite); - - this.slider = slider; - } - - _createClass(Infinite, [{ - key: 'init', - value: function init() { - if (this.slider.options.infinite && this.slider.options.effect === 'translate') { - if (this.slider.options.centerMode) { - this._infiniteCount = Math.ceil(this.slider.slidesToShow + this.slider.slidesToShow / 2); - } else { - this._infiniteCount = this.slider.slidesToShow; - } - - var frontClones = []; - var slideIndex = 0; - for (var i = this.slider.state.length; i > this.slider.state.length - 1 - this._infiniteCount; i -= 1) { - slideIndex = i - 1; - frontClones.unshift(this._cloneSlide(this.slider.slides[slideIndex], slideIndex - this.slider.state.length)); - } - - var backClones = []; - for (var _i = 0; _i < this._infiniteCount + this.slider.state.length; _i += 1) { - backClones.push(this._cloneSlide(this.slider.slides[_i % this.slider.state.length], _i + this.slider.state.length)); - } - - this.slider.slides = [].concat(frontClones, _toConsumableArray(this.slider.slides), backClones); - } - return this; - } - }, { - key: 'apply', - value: function apply() {} - }, { - key: 'onTransitionEnd', - value: function onTransitionEnd(e) { - if (this.slider.options.infinite) { - if (this.slider.state.next >= this.slider.state.length) { - this.slider.state.index = this.slider.state.next = this.slider.state.next - this.slider.state.length; - this.slider.transitioner.apply(true); - } else if (this.slider.state.next < 0) { - this.slider.state.index = this.slider.state.next = this.slider.state.length + this.slider.state.next; - this.slider.transitioner.apply(true); - } - } - } - }, { - key: '_cloneSlide', - value: function _cloneSlide(slide, index) { - var newSlide = slide.cloneNode(true); - newSlide.dataset.sliderIndex = index; - newSlide.dataset.cloned = true; - var ids = newSlide.querySelectorAll('[id]') || []; - ids.forEach(function (id) { - id.setAttribute('id', ''); - }); - return newSlide; - } - }]); - - return Infinite; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Infinite); - -/***/ }), -/* 11 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_dom__ = __webpack_require__(12); -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - -var Loop = function () { - function Loop(slider) { - _classCallCheck(this, Loop); - - this.slider = slider; - } - - _createClass(Loop, [{ - key: "init", - value: function init() { - return this; - } - }, { - key: "apply", - value: function apply() { - if (this.slider.options.loop) { - if (this.slider.state.next > 0) { - if (this.slider.state.next < this.slider.state.length) { - if (this.slider.state.next > this.slider.state.length - this.slider.slidesToShow && Object(__WEBPACK_IMPORTED_MODULE_0__utils_dom__["a" /* isInViewport */])(this.slider._slides[this.slider.state.length - 1], this.slider.wrapper)) { - this.slider.state.next = 0; - } else { - this.slider.state.next = Math.min(Math.max(this.slider.state.next, 0), this.slider.state.length - this.slider.slidesToShow); - } - } else { - this.slider.state.next = 0; - } - } else { - if (this.slider.state.next <= 0 - this.slider.slidesToScroll) { - this.slider.state.next = this.slider.state.length - this.slider.slidesToShow; - } else { - this.slider.state.next = 0; - } - } - } - } - }]); - - return Loop; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Loop); - -/***/ }), -/* 12 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return isInViewport; }); -var isInViewport = function isInViewport(element, html) { - var rect = element.getBoundingClientRect(); - html = html || document.documentElement; - return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || html.clientHeight) && rect.right <= (window.innerWidth || html.clientWidth); -}; - -/***/ }), -/* 13 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__templates_navigation__ = __webpack_require__(14); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_detect_supportsPassive__ = __webpack_require__(1); -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - - -var Navigation = function () { - function Navigation(slider) { - _classCallCheck(this, Navigation); - - this.slider = slider; - - this._clickEvents = ['click', 'touch']; - this._supportsPassive = Object(__WEBPACK_IMPORTED_MODULE_1__utils_detect_supportsPassive__["a" /* default */])(); - - this.onPreviousClick = this.onPreviousClick.bind(this); - this.onNextClick = this.onNextClick.bind(this); - this.onKeyUp = this.onKeyUp.bind(this); - } - - _createClass(Navigation, [{ - key: 'init', - value: function init() { - this.node = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_0__templates_navigation__["a" /* default */])(this.slider.options.icons)); - this._ui = { - previous: this.node.querySelector('.slider-navigation-previous'), - next: this.node.querySelector('.slider-navigation-next') - }; - - this._unbindEvents(); - this._bindEvents(); - - this.refresh(); - - return this; - } - }, { - key: 'destroy', - value: function destroy() { - this._unbindEvents(); - } - }, { - key: '_bindEvents', - value: function _bindEvents() { - var _this = this; - - this.slider.wrapper.addEventListener('keyup', this.onKeyUp); - this._clickEvents.forEach(function (clickEvent) { - _this._ui.previous.addEventListener(clickEvent, _this.onPreviousClick); - _this._ui.next.addEventListener(clickEvent, _this.onNextClick); - }); - } - }, { - key: '_unbindEvents', - value: function _unbindEvents() { - var _this2 = this; - - this.slider.wrapper.removeEventListener('keyup', this.onKeyUp); - this._clickEvents.forEach(function (clickEvent) { - _this2._ui.previous.removeEventListener(clickEvent, _this2.onPreviousClick); - _this2._ui.next.removeEventListener(clickEvent, _this2.onNextClick); - }); - } - }, { - key: 'onNextClick', - value: function onNextClick(e) { - if (!this._supportsPassive) { - e.preventDefault(); - } - - if (this.slider.options.navigation) { - this.slider.next(); - } - } - }, { - key: 'onPreviousClick', - value: function onPreviousClick(e) { - if (!this._supportsPassive) { - e.preventDefault(); - } - - if (this.slider.options.navigation) { - this.slider.previous(); - } - } - }, { - key: 'onKeyUp', - value: function onKeyUp(e) { - if (this.slider.options.keyNavigation) { - if (e.key === 'ArrowRight' || e.key === 'Right') { - this.slider.next(); - } else if (e.key === 'ArrowLeft' || e.key === 'Left') { - this.slider.previous(); - } - } - } - }, { - key: 'refresh', - value: function refresh() { - // let centerOffset = Math.floor(this.options.slidesToShow / 2); - if (!this.slider.options.loop && !this.slider.options.infinite) { - if (this.slider.options.navigation && this.slider.state.length > this.slider.slidesToShow) { - this._ui.previous.classList.remove('is-hidden'); - this._ui.next.classList.remove('is-hidden'); - if (this.slider.state.next === 0) { - this._ui.previous.classList.add('is-hidden'); - this._ui.next.classList.remove('is-hidden'); - } else if (this.slider.state.next >= this.slider.state.length - this.slider.slidesToShow && !this.slider.options.centerMode) { - this._ui.previous.classList.remove('is-hidden'); - this._ui.next.classList.add('is-hidden'); - } else if (this.slider.state.next >= this.slider.state.length - 1 && this.slider.options.centerMode) { - this._ui.previous.classList.remove('is-hidden'); - this._ui.next.classList.add('is-hidden'); - } - } else { - this._ui.previous.classList.add('is-hidden'); - this._ui.next.classList.add('is-hidden'); - } - } - } - }, { - key: 'render', - value: function render() { - return this.node; - } - }]); - - return Navigation; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Navigation); - -/***/ }), -/* 14 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony default export */ __webpack_exports__["a"] = (function (icons) { - return "
" + icons.previous + "
\n
" + icons.next + "
"; -}); - -/***/ }), -/* 15 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__templates_pagination__ = __webpack_require__(16); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__templates_pagination_page__ = __webpack_require__(17); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__utils_detect_supportsPassive__ = __webpack_require__(1); -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - - - -var Pagination = function () { - function Pagination(slider) { - _classCallCheck(this, Pagination); - - this.slider = slider; - - this._clickEvents = ['click', 'touch']; - this._supportsPassive = Object(__WEBPACK_IMPORTED_MODULE_2__utils_detect_supportsPassive__["a" /* default */])(); - - this.onPageClick = this.onPageClick.bind(this); - this.onResize = this.onResize.bind(this); - } - - _createClass(Pagination, [{ - key: 'init', - value: function init() { - this._pages = []; - this.node = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_0__templates_pagination__["a" /* default */])()); - this._ui = { - container: this.node.firstChild - }; - - this._count = Math.ceil((this.slider.state.length - this.slider.slidesToShow) / this.slider.slidesToScroll); - - this._draw(); - this.refresh(); - - return this; - } - }, { - key: 'destroy', - value: function destroy() { - this._unbindEvents(); - } - }, { - key: '_bindEvents', - value: function _bindEvents() { - var _this = this; - - window.addEventListener('resize', this.onResize); - window.addEventListener('orientationchange', this.onResize); - - this._clickEvents.forEach(function (clickEvent) { - _this._pages.forEach(function (page) { - return page.addEventListener(clickEvent, _this.onPageClick); - }); - }); - } - }, { - key: '_unbindEvents', - value: function _unbindEvents() { - var _this2 = this; - - window.removeEventListener('resize', this.onResize); - window.removeEventListener('orientationchange', this.onResize); - - this._clickEvents.forEach(function (clickEvent) { - _this2._pages.forEach(function (page) { - return page.removeEventListener(clickEvent, _this2.onPageClick); - }); - }); - } - }, { - key: '_draw', - value: function _draw() { - this._ui.container.innerHTML = ''; - if (this.slider.options.pagination && this.slider.state.length > this.slider.slidesToShow) { - for (var i = 0; i <= this._count; i++) { - var newPageNode = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_1__templates_pagination_page__["a" /* default */])()).firstChild; - newPageNode.dataset.index = i * this.slider.slidesToScroll; - this._pages.push(newPageNode); - this._ui.container.appendChild(newPageNode); - } - this._bindEvents(); - } - } - }, { - key: 'onPageClick', - value: function onPageClick(e) { - if (!this._supportsPassive) { - e.preventDefault(); - } - - this.slider.state.next = e.currentTarget.dataset.index; - this.slider.show(); - } - }, { - key: 'onResize', - value: function onResize() { - this._draw(); - } - }, { - key: 'refresh', - value: function refresh() { - var _this3 = this; - - var newCount = void 0; - - if (this.slider.options.infinite) { - newCount = Math.ceil(this.slider.state.length - 1 / this.slider.slidesToScroll); - } else { - newCount = Math.ceil((this.slider.state.length - this.slider.slidesToShow) / this.slider.slidesToScroll); - } - if (newCount !== this._count) { - this._count = newCount; - this._draw(); - } - - this._pages.forEach(function (page) { - page.classList.remove('is-active'); - if (parseInt(page.dataset.index, 10) === _this3.slider.state.next % _this3.slider.state.length) { - page.classList.add('is-active'); - } - }); - } - }, { - key: 'render', - value: function render() { - return this.node; - } - }]); - - return Pagination; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Pagination); - -/***/ }), -/* 16 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony default export */ __webpack_exports__["a"] = (function () { - return "
"; -}); - -/***/ }), -/* 17 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony default export */ __webpack_exports__["a"] = (function () { - return "
"; -}); - -/***/ }), -/* 18 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__ = __webpack_require__(4); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_detect_supportsPassive__ = __webpack_require__(1); -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - - -var Swipe = function () { - function Swipe(slider) { - _classCallCheck(this, Swipe); - - this.slider = slider; - - this._supportsPassive = Object(__WEBPACK_IMPORTED_MODULE_1__utils_detect_supportsPassive__["a" /* default */])(); - - this.onStartDrag = this.onStartDrag.bind(this); - this.onMoveDrag = this.onMoveDrag.bind(this); - this.onStopDrag = this.onStopDrag.bind(this); - - this._init(); - } - - _createClass(Swipe, [{ - key: '_init', - value: function _init() {} - }, { - key: 'bindEvents', - value: function bindEvents() { - var _this = this; - - this.slider.container.addEventListener('dragstart', function (e) { - if (!_this._supportsPassive) { - e.preventDefault(); - } - }); - this.slider.container.addEventListener('mousedown', this.onStartDrag); - this.slider.container.addEventListener('touchstart', this.onStartDrag); - - window.addEventListener('mousemove', this.onMoveDrag); - window.addEventListener('touchmove', this.onMoveDrag); - - window.addEventListener('mouseup', this.onStopDrag); - window.addEventListener('touchend', this.onStopDrag); - window.addEventListener('touchcancel', this.onStopDrag); - } - }, { - key: 'unbindEvents', - value: function unbindEvents() { - var _this2 = this; - - this.slider.container.removeEventListener('dragstart', function (e) { - if (!_this2._supportsPassive) { - e.preventDefault(); - } - }); - this.slider.container.removeEventListener('mousedown', this.onStartDrag); - this.slider.container.removeEventListener('touchstart', this.onStartDrag); - - window.removeEventListener('mousemove', this.onMoveDrag); - window.removeEventListener('touchmove', this.onMoveDrag); - - window.removeEventListener('mouseup', this.onStopDrag); - window.removeEventListener('mouseup', this.onStopDrag); - window.removeEventListener('touchcancel', this.onStopDrag); - } - - /** - * @param {MouseEvent|TouchEvent} - */ - - }, { - key: 'onStartDrag', - value: function onStartDrag(e) { - if (e.touches) { - if (e.touches.length > 1) { - return; - } else { - e = e.touches[0]; - } - } - - this._origin = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](e.screenX, e.screenY); - this.width = this.slider.wrapperWidth; - this.slider.transitioner.disable(); - } - - /** - * @param {MouseEvent|TouchEvent} - */ - - }, { - key: 'onMoveDrag', - value: function onMoveDrag(e) { - if (this._origin) { - var point = e.touches ? e.touches[0] : e; - this._lastTranslate = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](point.screenX - this._origin.x, point.screenY - this._origin.y); - if (e.touches) { - if (Math.abs(this._lastTranslate.x) > Math.abs(this._lastTranslate.y)) { - if (!this._supportsPassive) { - e.preventDefault(); - } - e.stopPropagation(); - } - } - } - } - - /** - * @param {MouseEvent|TouchEvent} - */ - - }, { - key: 'onStopDrag', - value: function onStopDrag(e) { - if (this._origin && this._lastTranslate) { - if (Math.abs(this._lastTranslate.x) > 0.2 * this.width) { - if (this._lastTranslate.x < 0) { - this.slider.next(); - } else { - this.slider.previous(); - } - } else { - this.slider.show(true); - } - } - this._origin = null; - this._lastTranslate = null; - } - }]); - - return Swipe; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Swipe); - -/***/ }), -/* 19 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__transitions_fade__ = __webpack_require__(20); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__transitions_translate__ = __webpack_require__(21); -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - - -var Transitioner = function () { - function Transitioner(slider) { - _classCallCheck(this, Transitioner); - - this.slider = slider; - this.options = slider.options; - - this._animating = false; - this._animation = undefined; - - this._translate = new __WEBPACK_IMPORTED_MODULE_1__transitions_translate__["a" /* default */](this, slider, slider.options); - this._fade = new __WEBPACK_IMPORTED_MODULE_0__transitions_fade__["a" /* default */](this, slider, slider.options); - } - - _createClass(Transitioner, [{ - key: 'init', - value: function init() { - this._fade.init(); - this._translate.init(); - return this; - } - }, { - key: 'isAnimating', - value: function isAnimating() { - return this._animating; - } - }, { - key: 'enable', - value: function enable() { - this._animation && this._animation.enable(); - } - }, { - key: 'disable', - value: function disable() { - this._animation && this._animation.disable(); - } - }, { - key: 'apply', - value: function apply(force, callback) { - // If we don't force refresh and animation in progress then return - if (this._animating && !force) { - return; - } - - switch (this.options.effect) { - case 'fade': - this._animation = this._fade; - break; - case 'translate': - default: - this._animation = this._translate; - break; - } - - this._animationCallback = callback; - - if (force) { - this._animation && this._animation.disable(); - } else { - this._animation && this._animation.enable(); - this._animating = true; - } - - this._animation && this._animation.apply(); - - if (force) { - this.end(); - } - } - }, { - key: 'end', - value: function end() { - this._animating = false; - this._animation = undefined; - this.slider.state.index = this.slider.state.next; - if (this._animationCallback) { - this._animationCallback(); - } - } - }]); - - return Transitioner; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Transitioner); - -/***/ }), -/* 20 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_css__ = __webpack_require__(0); -var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - -var Fade = function () { - function Fade(transitioner, slider) { - var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; - - _classCallCheck(this, Fade); - - this.transitioner = transitioner; - this.slider = slider; - this.options = _extends({}, options); - } - - _createClass(Fade, [{ - key: 'init', - value: function init() { - var _this = this; - - if (this.options.effect === 'fade') { - this.slider.slides.forEach(function (slide, index) { - Object(__WEBPACK_IMPORTED_MODULE_0__utils_css__["a" /* css */])(slide, { - position: 'absolute', - left: 0, - top: 0, - bottom: 0, - 'z-index': slide.dataset.sliderIndex == _this.slider.state.index ? 0 : -2, - opacity: slide.dataset.sliderIndex == _this.slider.state.index ? 1 : 0 - }); - }); - } - return this; - } - }, { - key: 'enable', - value: function enable() { - var _this2 = this; - - this._oldSlide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this2.slider.state.index; - })[0]; - this._newSlide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this2.slider.state.next; - })[0]; - if (this._newSlide) { - this._newSlide.addEventListener('transitionend', this.onTransitionEnd.bind(this)); - this._newSlide.style.transition = this.options.duration + 'ms ' + this.options.timing; - if (this._oldSlide) { - this._oldSlide.addEventListener('transitionend', this.onTransitionEnd.bind(this)); - this._oldSlide.style.transition = this.options.duration + 'ms ' + this.options.timing; - } - } - } - }, { - key: 'disable', - value: function disable() { - var _this3 = this; - - this._oldSlide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this3.slider.state.index; - })[0]; - this._newSlide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this3.slider.state.next; - })[0]; - if (this._newSlide) { - this._newSlide.removeEventListener('transitionend', this.onTransitionEnd.bind(this)); - this._newSlide.style.transition = 'none'; - if (this._oldSlide) { - this._oldSlide.removeEventListener('transitionend', this.onTransitionEnd.bind(this)); - this._oldSlide.style.transition = 'none'; - } - } - } - }, { - key: 'apply', - value: function apply(force) { - var _this4 = this; - - this._oldSlide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this4.slider.state.index; - })[0]; - this._newSlide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this4.slider.state.next; - })[0]; - - if (this._oldSlide && this._newSlide) { - Object(__WEBPACK_IMPORTED_MODULE_0__utils_css__["a" /* css */])(this._oldSlide, { - opacity: 0 - }); - Object(__WEBPACK_IMPORTED_MODULE_0__utils_css__["a" /* css */])(this._newSlide, { - opacity: 1, - 'z-index': force ? 0 : -1 - }); - } - } - }, { - key: 'onTransitionEnd', - value: function onTransitionEnd(e) { - if (this.options.effect === 'fade') { - if (this.transitioner.isAnimating() && e.target == this._newSlide) { - if (this._newSlide) { - Object(__WEBPACK_IMPORTED_MODULE_0__utils_css__["a" /* css */])(this._newSlide, { - 'z-index': 0 - }); - this._newSlide.removeEventListener('transitionend', this.onTransitionEnd.bind(this)); - } - if (this._oldSlide) { - Object(__WEBPACK_IMPORTED_MODULE_0__utils_css__["a" /* css */])(this._oldSlide, { - 'z-index': -2 - }); - this._oldSlide.removeEventListener('transitionend', this.onTransitionEnd.bind(this)); - } - } - this.transitioner.end(); - } - } - }]); - - return Fade; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Fade); - -/***/ }), -/* 21 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__ = __webpack_require__(4); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_css__ = __webpack_require__(0); -var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; - -var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); - -function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } - - - - -var Translate = function () { - function Translate(transitioner, slider) { - var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; - - _classCallCheck(this, Translate); - - this.transitioner = transitioner; - this.slider = slider; - this.options = _extends({}, options); - - this.onTransitionEnd = this.onTransitionEnd.bind(this); - } - - _createClass(Translate, [{ - key: 'init', - value: function init() { - this._position = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](this.slider.container.offsetLeft, this.slider.container.offsetTop); - this._bindEvents(); - return this; - } - }, { - key: 'destroy', - value: function destroy() { - this._unbindEvents(); - } - }, { - key: '_bindEvents', - value: function _bindEvents() { - this.slider.container.addEventListener('transitionend', this.onTransitionEnd); - } - }, { - key: '_unbindEvents', - value: function _unbindEvents() { - this.slider.container.removeEventListener('transitionend', this.onTransitionEnd); - } - }, { - key: 'enable', - value: function enable() { - this.slider.container.style.transition = this.options.duration + 'ms ' + this.options.timing; - } - }, { - key: 'disable', - value: function disable() { - this.slider.container.style.transition = 'none'; - } - }, { - key: 'apply', - value: function apply() { - var _this = this; - - var maxOffset = void 0; - if (this.options.effect === 'translate') { - var slide = this.slider.slides.filter(function (slide) { - return slide.dataset.sliderIndex == _this.slider.state.next; - })[0]; - var slideOffset = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](slide.offsetLeft, slide.offsetTop); - if (this.options.centerMode) { - maxOffset = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](Math.round(Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["e" /* width */])(this.slider.container)), Math.round(Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["b" /* height */])(this.slider.container))); - } else { - maxOffset = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](Math.round(Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["e" /* width */])(this.slider.container) - Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["e" /* width */])(this.slider.wrapper)), Math.round(Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["b" /* height */])(this.slider.container) - Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["b" /* height */])(this.slider.wrapper))); - } - var nextOffset = new __WEBPACK_IMPORTED_MODULE_0__utils_coordinate__["a" /* default */](Math.min(Math.max(slideOffset.x * -1, maxOffset.x * -1), 0), Math.min(Math.max(slideOffset.y * -1, maxOffset.y * -1), 0)); - if (this.options.loop) { - if (!this.options.vertical && Math.abs(this._position.x) > maxOffset.x) { - nextOffset.x = 0; - this.slider.state.next = 0; - } else if (this.options.vertical && Math.abs(this._position.y) > maxOffset.y) { - nextOffset.y = 0; - this.slider.state.next = 0; - } - } - - this._position.x = nextOffset.x; - this._position.y = nextOffset.y; - if (this.options.centerMode) { - this._position.x = this._position.x + this.slider.wrapperWidth / 2 - Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["e" /* width */])(slide) / 2; - } - - if (this.slider.direction === 'rtl') { - this._position.x = -this._position.x; - this._position.y = -this._position.y; - } - this.slider.container.style.transform = 'translate3d(' + this._position.x + 'px, ' + this._position.y + 'px, 0)'; - - /** - * update the index with the nextIndex only if - * the offset of the nextIndex is in the range of the maxOffset - */ - if (slideOffset.x > maxOffset.x) { - this.slider.transitioner.end(); - } - } - } - }, { - key: 'onTransitionEnd', - value: function onTransitionEnd(e) { - if (this.options.effect === 'translate') { - - if (this.transitioner.isAnimating() && e.target == this.slider.container) { - if (this.options.infinite) { - this.slider._infinite.onTransitionEnd(e); - } - } - this.transitioner.end(); - } - } - }]); - - return Translate; -}(); - -/* harmony default export */ __webpack_exports__["a"] = (Translate); - -/***/ }), -/* 22 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -var defaultOptions = { - initialSlide: 0, - slidesToScroll: 1, - slidesToShow: 1, - - navigation: true, - navigationKeys: true, - navigationSwipe: true, - - pagination: true, - - loop: false, - infinite: false, - - effect: 'translate', - duration: 300, - timing: 'ease', - - autoplay: false, - autoplaySpeed: 3000, - pauseOnHover: true, - breakpoints: [{ - changePoint: 480, - slidesToShow: 1, - slidesToScroll: 1 - }, { - changePoint: 640, - slidesToShow: 2, - slidesToScroll: 2 - }, { - changePoint: 768, - slidesToShow: 3, - slidesToScroll: 3 - }], - - onReady: null, - icons: { - 'previous': '\n \n ', - 'next': '\n \n ' - } -}; - -/* harmony default export */ __webpack_exports__["a"] = (defaultOptions); - -/***/ }), -/* 23 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony default export */ __webpack_exports__["a"] = (function (id) { - return "
\n
\n
"; -}); - -/***/ }), -/* 24 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony default export */ __webpack_exports__["a"] = (function () { - return "
"; -}); - -/***/ }) -/******/ ])["default"]; -}); \ No newline at end of file diff --git a/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-carousel.min.js b/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-carousel.min.js deleted file mode 100644 index 5fff0695f00cf9da60dd87aa72c51367b00e92ff..0000000000000000000000000000000000000000 --- a/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-carousel.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaCarousel=e():t.bulmaCarousel=e()}("undefined"!=typeof self?self:this,function(){return function(i){var n={};function s(t){if(n[t])return n[t].exports;var e=n[t]={i:t,l:!1,exports:{}};return i[t].call(e.exports,e,e.exports,s),e.l=!0,e.exports}return s.m=i,s.c=n,s.d=function(t,e,i){s.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:i})},s.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return s.d(e,"a",e),e},s.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},s.p="",s(s.s=5)}([function(t,e,i){"use strict";i.d(e,"d",function(){return s}),i.d(e,"e",function(){return r}),i.d(e,"b",function(){return o}),i.d(e,"c",function(){return a}),i.d(e,"a",function(){return l});var n=i(2),s=function(e,t){(t=Array.isArray(t)?t:t.split(" ")).forEach(function(t){e.classList.remove(t)})},r=function(t){return t.getBoundingClientRect().width||t.offsetWidth},o=function(t){return t.getBoundingClientRect().height||t.offsetHeight},a=function(t){var e=1=t._x&&this._x<=e._x&&this._y>=t._y&&this._y<=e._y}},{key:"constrain",value:function(t,e){if(t._x>e._x||t._y>e._y)return this;var i=this._x,n=this._y;return null!==t._x&&(i=Math.max(i,t._x)),null!==e._x&&(i=Math.min(i,e._x)),null!==t._y&&(n=Math.max(n,t._y)),null!==e._y&&(n=Math.min(n,e._y)),new s(i,n)}},{key:"reposition",value:function(t){t.style.top=this._y+"px",t.style.left=this._x+"px"}},{key:"toString",value:function(){return"("+this._x+","+this._y+")"}},{key:"x",get:function(){return this._x},set:function(){var t=0this.state.length-this.slidesToShow&&!this.options.centerMode?this.state.next=this.state.index:this.state.next=this.state.index+this.slidesToScroll,this.show()}},{key:"previous",value:function(){this.options.loop||this.options.infinite||0!==this.state.index?this.state.next=this.state.index-this.slidesToScroll:this.state.next=this.state.index,this.show()}},{key:"start",value:function(){this._autoplay.start()}},{key:"pause",value:function(){this._autoplay.pause()}},{key:"stop",value:function(){this._autoplay.stop()}},{key:"show",value:function(t){var e=1this.options.slidesToShow&&(this.options.slidesToScroll=this.slidesToShow),this._breakpoint.init(),this.state.index>=this.state.length&&0!==this.state.index&&(this.state.index=this.state.index-this.slidesToScroll),this.state.length<=this.slidesToShow&&(this.state.index=0),this._ui.wrapper.appendChild(this._navigation.init().render()),this._ui.wrapper.appendChild(this._pagination.init().render()),this.options.navigationSwipe?this._swipe.bindEvents():this._swipe._bindEvents(),this._breakpoint.apply(),this._slides.forEach(function(t){return e._ui.container.appendChild(t)}),this._transitioner.init().apply(!0,this._setHeight.bind(this)),this.options.autoplay&&this._autoplay.init().start()}},{key:"destroy",value:function(){var e=this;this._unbindEvents(),this._items.forEach(function(t){e.element.appendChild(t)}),this.node.remove()}},{key:"id",get:function(){return this._id}},{key:"index",set:function(t){this._index=t},get:function(){return this._index}},{key:"length",set:function(t){this._length=t},get:function(){return this._length}},{key:"slides",get:function(){return this._slides},set:function(t){this._slides=t}},{key:"slidesToScroll",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToScroll():1}},{key:"slidesToShow",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToShow():1}},{key:"direction",get:function(){return"rtl"===this.element.dir.toLowerCase()||"rtl"===this.element.style.direction?"rtl":"ltr"}},{key:"wrapper",get:function(){return this._ui.wrapper}},{key:"wrapperWidth",get:function(){return this._wrapperWidth||0}},{key:"container",get:function(){return this._ui.container}},{key:"containerWidth",get:function(){return this._containerWidth||0}},{key:"slideWidth",get:function(){return this._slideWidth||0}},{key:"transitioner",get:function(){return this._transitioner}}],[{key:"attach",value:function(){var i=this,t=0>t/4).toString(16)})}},function(t,e,i){"use strict";var n=i(3),s=i(8),r=function(){function n(t,e){for(var i=0;i=t.slider.state.length-t.slider.slidesToShow&&!t.slider.options.loop&&!t.slider.options.infinite?t.stop():t.slider.next())},this.slider.options.autoplaySpeed))}},{key:"stop",value:function(){this._interval=clearInterval(this._interval),this.emit("stop",this)}},{key:"pause",value:function(){var t=this,e=0parseInt(e.changePoint,10)}),this._currentBreakpoint=this._getActiveBreakpoint(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){window.addEventListener("resize",this[s]),window.addEventListener("orientationchange",this[s])}},{key:"_unbindEvents",value:function(){window.removeEventListener("resize",this[s]),window.removeEventListener("orientationchange",this[s])}},{key:"_getActiveBreakpoint",value:function(){var t=!0,e=!1,i=void 0;try{for(var n,s=this.options.breakpoints[Symbol.iterator]();!(t=(n=s.next()).done);t=!0){var r=n.value;if(r.changePoint>=window.innerWidth)return r}}catch(t){e=!0,i=t}finally{try{!t&&s.return&&s.return()}finally{if(e)throw i}}return this._defaultBreakpoint}},{key:"getSlidesToShow",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToShow:this._defaultBreakpoint.slidesToShow}},{key:"getSlidesToScroll",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToScroll:this._defaultBreakpoint.slidesToScroll}},{key:"apply",value:function(){this.slider.state.index>=this.slider.state.length&&0!==this.slider.state.index&&(this.slider.state.index=this.slider.state.index-this._currentBreakpoint.slidesToScroll),this.slider.state.length<=this._currentBreakpoint.slidesToShow&&(this.slider.state.index=0),this.options.loop&&this.slider._loop.init().apply(),this.options.infinite&&this.slider._infinite.init().apply(),this.slider._setDimensions(),this.slider._transitioner.init().apply(!0,this.slider._setHeight.bind(this.slider)),this.slider._setClasses(),this.slider._navigation.refresh(),this.slider._pagination.refresh()}},{key:s,value:function(t){var e=this._getActiveBreakpoint();e.slidesToShow!==this._currentBreakpoint.slidesToShow&&(this._currentBreakpoint=e,this.apply())}}]),e}();e.a=r},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;ithis.slider.state.length-1-this._infiniteCount;i-=1)e=i-1,t.unshift(this._cloneSlide(this.slider.slides[e],e-this.slider.state.length));for(var n=[],s=0;s=this.slider.state.length?(this.slider.state.index=this.slider.state.next=this.slider.state.next-this.slider.state.length,this.slider.transitioner.apply(!0)):this.slider.state.next<0&&(this.slider.state.index=this.slider.state.next=this.slider.state.length+this.slider.state.next,this.slider.transitioner.apply(!0)))}},{key:"_cloneSlide",value:function(t,e){var i=t.cloneNode(!0);return i.dataset.sliderIndex=e,i.dataset.cloned=!0,(i.querySelectorAll("[id]")||[]).forEach(function(t){t.setAttribute("id","")}),i}}]),e}();e.a=s},function(t,e,i){"use strict";var n=i(12),s=function(){function n(t,e){for(var i=0;ithis.slider.state.length-this.slider.slidesToShow&&Object(n.a)(this.slider._slides[this.slider.state.length-1],this.slider.wrapper)?this.slider.state.next=0:this.slider.state.next=Math.min(Math.max(this.slider.state.next,0),this.slider.state.length-this.slider.slidesToShow):this.slider.state.next=0:this.slider.state.next<=0-this.slider.slidesToScroll?this.slider.state.next=this.slider.state.length-this.slider.slidesToShow:this.slider.state.next=0)}}]),e}();e.a=r},function(t,e,i){"use strict";i.d(e,"a",function(){return n});var n=function(t,e){var i=t.getBoundingClientRect();return e=e||document.documentElement,0<=i.top&&0<=i.left&&i.bottom<=(window.innerHeight||e.clientHeight)&&i.right<=(window.innerWidth||e.clientWidth)}},function(t,e,i){"use strict";var n=i(14),s=i(1),r=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.remove("is-hidden"),0===this.slider.state.next?(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.remove("is-hidden")):this.slider.state.next>=this.slider.state.length-this.slider.slidesToShow&&!this.slider.options.centerMode?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden")):this.slider.state.next>=this.slider.state.length-1&&this.slider.options.centerMode&&(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden"))):(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.add("is-hidden")))}},{key:"render",value:function(){return this.node}}]),e}();e.a=o},function(t,e,i){"use strict";e.a=function(t){return'
'+t.previous+'
\n
'+t.next+"
"}},function(t,e,i){"use strict";var n=i(16),s=i(17),r=i(1),o=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow){for(var t=0;t<=this._count;t++){var e=document.createRange().createContextualFragment(Object(s.a)()).firstChild;e.dataset.index=t*this.slider.slidesToScroll,this._pages.push(e),this._ui.container.appendChild(e)}this._bindEvents()}}},{key:"onPageClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.state.next=t.currentTarget.dataset.index,this.slider.show()}},{key:"onResize",value:function(){this._draw()}},{key:"refresh",value:function(){var e=this,t=void 0;(t=this.slider.options.infinite?Math.ceil(this.slider.state.length-1/this.slider.slidesToScroll):Math.ceil((this.slider.state.length-this.slider.slidesToShow)/this.slider.slidesToScroll))!==this._count&&(this._count=t,this._draw()),this._pages.forEach(function(t){t.classList.remove("is-active"),parseInt(t.dataset.index,10)===e.slider.state.next%e.slider.state.length&&t.classList.add("is-active")})}},{key:"render",value:function(){return this.node}}]),e}();e.a=a},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";var n=i(4),s=i(1),r=function(){function n(t,e){for(var i=0;iMath.abs(this._lastTranslate.y)&&(this._supportsPassive||t.preventDefault(),t.stopPropagation())}}},{key:"onStopDrag",value:function(t){this._origin&&this._lastTranslate&&(Math.abs(this._lastTranslate.x)>.2*this.width?this._lastTranslate.x<0?this.slider.next():this.slider.previous():this.slider.show(!0)),this._origin=null,this._lastTranslate=null}}]),e}();e.a=o},function(t,e,i){"use strict";var n=i(20),s=i(21),r=function(){function n(t,e){for(var i=0;it.x?(s.x=0,this.slider.state.next=0):this.options.vertical&&Math.abs(this._position.y)>t.y&&(s.y=0,this.slider.state.next=0)),this._position.x=s.x,this._position.y=s.y,this.options.centerMode&&(this._position.x=this._position.x+this.slider.wrapperWidth/2-Object(o.e)(i)/2),"rtl"===this.slider.direction&&(this._position.x=-this._position.x,this._position.y=-this._position.y),this.slider.container.style.transform="translate3d("+this._position.x+"px, "+this._position.y+"px, 0)",n.x>t.x&&this.slider.transitioner.end()}}},{key:"onTransitionEnd",value:function(t){"translate"===this.options.effect&&(this.transitioner.isAnimating()&&t.target==this.slider.container&&this.options.infinite&&this.slider._infinite.onTransitionEnd(t),this.transitioner.end())}}]),n}();e.a=n},function(t,e,i){"use strict";e.a={initialSlide:0,slidesToScroll:1,slidesToShow:1,navigation:!0,navigationKeys:!0,navigationSwipe:!0,pagination:!0,loop:!1,infinite:!1,effect:"translate",duration:300,timing:"ease",autoplay:!1,autoplaySpeed:3e3,pauseOnHover:!0,breakpoints:[{changePoint:480,slidesToShow:1,slidesToScroll:1},{changePoint:640,slidesToShow:2,slidesToScroll:2},{changePoint:768,slidesToShow:3,slidesToScroll:3}],onReady:null,icons:{previous:'\n \n ',next:'\n \n '}}},function(t,e,i){"use strict";e.a=function(t){return'
\n
\n
'}},function(t,e,i){"use strict";e.a=function(){return'
'}}]).default}); \ No newline at end of file diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/text/cleaner.py b/spaces/Mahiruoshi/MyGO_VIts-bert/text/cleaner.py deleted file mode 100644 index 3ba3739816aabbe16663b68c74fcda0588c14bab..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/MyGO_VIts-bert/text/cleaner.py +++ /dev/null @@ -1,28 +0,0 @@ -from text import chinese, japanese, cleaned_text_to_sequence - - -language_module_map = {"ZH": chinese, "JP": japanese} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - - -if __name__ == "__main__": - pass diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/backbone/position_encoding.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/backbone/position_encoding.py deleted file mode 100644 index eac7e896bbe85a670824bfe8ef487d0535d5bd99..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/backbone/position_encoding.py +++ /dev/null @@ -1,186 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# DINO -# Copyright (c) 2022 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copied from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -""" -Various positional encodings for the transformer. -""" -import math - -import torch -from torch import nn - -from groundingdino.util.misc import NestedTensor - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - mask = tensor_list.mask - assert mask is not None - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - # if os.environ.get("SHILONG_AMP", None) == '1': - # eps = 1e-4 - # else: - # eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - -class PositionEmbeddingSineHW(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - - def __init__( - self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None - ): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperatureH = temperatureH - self.temperatureW = temperatureW - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - mask = tensor_list.mask - assert mask is not None - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - - # import ipdb; ipdb.set_trace() - - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats) - pos_x = x_embed[:, :, :, None] / dim_tx - - dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats) - pos_y = y_embed[:, :, :, None] / dim_ty - - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - - # import ipdb; ipdb.set_trace() - - return pos - - -class PositionEmbeddingLearned(nn.Module): - """ - Absolute pos embedding, learned. - """ - - def __init__(self, num_pos_feats=256): - super().__init__() - self.row_embed = nn.Embedding(50, num_pos_feats) - self.col_embed = nn.Embedding(50, num_pos_feats) - self.reset_parameters() - - def reset_parameters(self): - nn.init.uniform_(self.row_embed.weight) - nn.init.uniform_(self.col_embed.weight) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - h, w = x.shape[-2:] - i = torch.arange(w, device=x.device) - j = torch.arange(h, device=x.device) - x_emb = self.col_embed(i) - y_emb = self.row_embed(j) - pos = ( - torch.cat( - [ - x_emb.unsqueeze(0).repeat(h, 1, 1), - y_emb.unsqueeze(1).repeat(1, w, 1), - ], - dim=-1, - ) - .permute(2, 0, 1) - .unsqueeze(0) - .repeat(x.shape[0], 1, 1, 1) - ) - return pos - - -def build_position_encoding(args): - N_steps = args.hidden_dim // 2 - if args.position_embedding in ("v2", "sine"): - # TODO find a better way of exposing other arguments - position_embedding = PositionEmbeddingSineHW( - N_steps, - temperatureH=args.pe_temperatureH, - temperatureW=args.pe_temperatureW, - normalize=True, - ) - elif args.position_embedding in ("v3", "learned"): - position_embedding = PositionEmbeddingLearned(N_steps) - else: - raise ValueError(f"not supported {args.position_embedding}") - - return position_embedding diff --git a/spaces/Makiing/coolb-in-gtest/src/components/markdown.tsx b/spaces/Makiing/coolb-in-gtest/src/components/markdown.tsx deleted file mode 100644 index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000 --- a/spaces/Makiing/coolb-in-gtest/src/components/markdown.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from 'react' -import ReactMarkdown, { Options } from 'react-markdown' - -export const MemoizedReactMarkdown: FC = memo( - ReactMarkdown, - (prevProps, nextProps) => - prevProps.children === nextProps.children && - prevProps.className === nextProps.className -) diff --git a/spaces/Mecca/whisper-webui/app-network.py b/spaces/Mecca/whisper-webui/app-network.py deleted file mode 100644 index 4f0e565b9029761d4b995fe32a65c58d1de55f53..0000000000000000000000000000000000000000 --- a/spaces/Mecca/whisper-webui/app-network.py +++ /dev/null @@ -1,5 +0,0 @@ -# Run the app with no audio file restrictions, and make it available on the network -from app import create_ui -from src.config import ApplicationConfig - -create_ui(ApplicationConfig.create_default(input_audio_max_duration=-1, server_name="0.0.0.0")) \ No newline at end of file diff --git a/spaces/MehdiAmirate/Botv2/README.md b/spaces/MehdiAmirate/Botv2/README.md deleted file mode 100644 index 1776fa615c1586134e8e3b67c1169777990b1f89..0000000000000000000000000000000000000000 --- a/spaces/MehdiAmirate/Botv2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ChatBot -emoji: 💻 -colorFrom: blue -colorTo: yellow -sdk: docker -pinned: false -license: cc-by-nc-2.0 -duplicated_from: MehdiAmirate/chatBot ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MiloSobral/PortiloopDemo/portiloop/src/capture.py b/spaces/MiloSobral/PortiloopDemo/portiloop/src/capture.py deleted file mode 100644 index 57f76370a13f0af35c87e6d1fdacb8ced1ce8568..0000000000000000000000000000000000000000 --- a/spaces/MiloSobral/PortiloopDemo/portiloop/src/capture.py +++ /dev/null @@ -1,1096 +0,0 @@ - - -from time import sleep -import time -import numpy as np -from copy import deepcopy -from datetime import datetime -import multiprocessing as mp -import warnings -from threading import Thread, Lock -from portiloop.src import ADS - -if ADS: - import alsaaudio - from portiloop.src.hardware.frontend import Frontend - from portiloop.src.hardware.leds import LEDs, Color - -from portiloop.src.stimulation import UpStateDelayer - - -from portiloop.src.processing import FilterPipeline, int_to_float -from portiloop.src.config import mod_config, LEADOFF_CONFIG, FRONTEND_CONFIG, to_ads_frequency -from portiloop.src.utils import FileReader, LiveDisplay, DummyAlsaMixer, EDFRecorder, EDF_PATH, RECORDING_PATH -from IPython.display import clear_output, display -import ipywidgets as widgets - - -def capture_process(p_data_o, p_msg_io, duration, frequency, python_clock, time_msg_in, channel_states): - """ - Args: - p_data_o: multiprocessing.Pipe: captured datapoints are put here - p_msg_io: mutliprocessing.Pipe: to communicate with the parent process - duration: float: max duration of the experiment in seconds - frequency: float: sampling frequency - ptyhon_clock: bool: if True, the Coral clock is used, otherwise, the ADS interrupts are used - time_msg_in: float: min time between attempts to recv incomming messages - """ - if duration <= 0: - duration = np.inf - - sample_time = 1 / frequency - - frontend = Frontend() - leds = LEDs() - leds.led2(Color.PURPLE) - leds.aquisition(True) - - try: - data = frontend.read_regs(0x00, 1) - assert data == [0x3E], "The communication with the ADS failed, please try again." - leds.led2(Color.BLUE) - - config = FRONTEND_CONFIG - if python_clock: # set ADS to 2 * frequency - datarate = 2 * frequency - else: # set ADS to frequency - datarate = frequency - config = mod_config(config, datarate, channel_states) - - frontend.write_regs(0x00, config) - data = frontend.read_regs(0x00, len(config)) - assert data == config, f"Wrong config: {data} vs {config}" - frontend.start() - leds.led2(Color.PURPLE) - while not frontend.is_ready(): - pass - - # Set up of leds - leds.aquisition(True) - sleep(0.5) - leds.aquisition(False) - sleep(0.5) - leds.aquisition(True) - - c = True - - it = 0 - t_start = time.time() - t_max = t_start + duration - t = t_start - - # first sample: - reading = frontend.read() - datapoint = reading.channels() - p_data_o.send(datapoint) - - t_next = t + sample_time - t_chk_msg = t + time_msg_in - - # sampling loop: - while c and t < t_max: - t = time.time() - if python_clock: - if t <= t_next: - time.sleep(t_next - t) - t_next += sample_time - reading = frontend.read() - else: - reading = frontend.wait_new_data() - datapoint = reading.channels() - p_data_o.send(datapoint) - - # Check for messages - if t >= t_chk_msg: - t_chk_msg = t + time_msg_in - if p_msg_io.poll(): - message = p_msg_io.recv() - if message == 'STOP': - c = False - it += 1 - t = time.time() - tot = (t - t_start) / it - - p_msg_io.send(("PRT", f"Average frequency: {1 / tot} Hz for {it} samples")) - - finally: - leds.aquisition(False) - leds.close() - frontend.close() - p_msg_io.send('STOP') - p_msg_io.close() - p_data_o.close() - - - -class Capture: - def __init__(self, detector_cls=None, stimulator_cls=None): - # {now.strftime('%m_%d_%Y_%H_%M_%S')} - self.filename = EDF_PATH / 'recording.edf' - self._p_capture = None - self.__capture_on = False - self.frequency = 250 - self.duration = 28800 - self.power_line = 60 - self.polyak_mean = 0.1 - self.polyak_std = 0.001 - self.epsilon = 0.000001 - self.custom_fir = False - self.custom_fir_order = 20 - self.custom_fir_cutoff = 30 - self.filter = True - self.filter_args = [True, True, True] - self.record = False - self.detect = False - self.stimulate = False - self.threshold = 0.82 - self.lsl = False - self.display = False - self.signal_input = "ADS" - self.python_clock = True - self.edf_writer = None - self.edf_buffer = [] - self.signal_labels = ['Common Mode', 'ch2', 'ch3', 'ch4', 'ch5', 'ch6', 'ch7', 'ch8'] - self._lock_msg_out = Lock() - self._msg_out = None - self._t_capture = None - self.channel_states = ['disabled', 'disabled', 'disabled', 'disabled', 'disabled', 'disabled', 'disabled'] - self.channel_detection = 2 - self.spindle_detection_mode = 'Fast' - self.spindle_freq = 10 - - self.detector_cls = detector_cls - self.stimulator_cls = stimulator_cls - - self._test_stimulus_lock = Lock() - self._test_stimulus = False - - self._pause_detect_lock = Lock() - self._pause_detect = True - - if ADS: - try: - mixers = alsaaudio.mixers() - if len(mixers) <= 0: - warnings.warn(f"No ALSA mixer found.") - self.mixer = DummyAlsaMixer() - elif 'PCM' in mixers: - self.mixer = alsaaudio.Mixer(control='PCM') - else: - warnings.warn(f"Could not find mixer PCM, using {mixers[0]} instead.") - self.mixer = alsaaudio.Mixer(control=mixers[0]) - except ALSAAudioError as e: - warnings.warn(f"No ALSA mixer found.") - self.mixer = DummyAlsaMixer() - - self.volume = self.mixer.getvolume()[0] # we will set the same volume on all channels - else: - self.mixer = DummyAlsaMixer() - self.volume = self.mixer.getvolume()[0] - - # widgets =============================== - - # CHANNELS ------------------------------ - -# self.b_radio_ch1 = widgets.RadioButtons( -# options=['disabled', 'simple'], -# value='disabled', -# disabled=True -# ) - - self.b_radio_ch2 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_radio_ch3 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_radio_ch4 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_radio_ch5 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_radio_ch6 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_radio_ch7 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_radio_ch8 = widgets.RadioButtons( - options=['disabled', 'simple'], - value='disabled', - disabled=False - ) - - self.b_channel_detect = widgets.Dropdown( - options=[('2', 2), ('3', 3), ('4', 4), ('5', 5), ('6', 6), ('7', 7), ('8', 8)], - value=2, - description='Detection Channel:', - disabled=False, - style={'description_width': 'initial'} - ) - - self.b_spindle_mode = widgets.Dropdown( - options=['Fast', 'Peak', 'Through'], - value='Fast', - description='Spindle Stimulation Mode', - disabled=False, - style={'description_width': 'initial'} - ) - - self.b_spindle_freq = widgets.IntText( - value=self.spindle_freq, - description='Spindle Freq (Hz):', - disabled=False, - style={'description_width': 'initial'} - ) - - self.b_accordion_channels = widgets.Accordion( - children=[ - widgets.GridBox([ - widgets.Label('CH2'), - widgets.Label('CH3'), - widgets.Label('CH4'), - widgets.Label('CH5'), - widgets.Label('CH6'), - widgets.Label('CH7'), - widgets.Label('CH8'), - self.b_radio_ch2, - self.b_radio_ch3, - self.b_radio_ch4, - self.b_radio_ch5, - self.b_radio_ch6, - self.b_radio_ch7, - self.b_radio_ch8], layout=widgets.Layout(grid_template_columns="repeat(7, 90px)") - ) - ]) - self.b_accordion_channels.set_title(index = 0, title = 'Channels') - - # OTHERS ------------------------------ - - self.b_capture = widgets.ToggleButtons( - options=['Stop', 'Start'], - description='Capture:', - disabled=False, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltips=['Stop capture', 'Start capture'], - ) - - self.b_pause = widgets.ToggleButtons( - options=['Paused', 'Active'], - description='Detection', - disabled=True, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltips=['Detector and stimulator active', 'Detector and stimulator paused'], - ) - - self.b_clock = widgets.ToggleButtons( - options=['ADS', 'Coral'], - description='Clock:', - disabled=False, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltips=['Use Coral clock (very precise, not very timely)', - 'Use ADS clock (not very precise, very timely)'], - ) - - self.b_power_line = widgets.ToggleButtons( - options=['60 Hz', '50 Hz'], - description='Power line:', - disabled=False, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltips=['North America 60 Hz', - 'Europe 50 Hz'], - ) - - self.b_signal_input = widgets.ToggleButtons( - options=['ADS', 'File'], - description='Signal Input:', - disabled=False, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltips=['Read data from ADS.', - 'Read data from file.'], - ) - - self.b_custom_fir = widgets.ToggleButtons( - options=['Default', 'Custom'], - description='FIR filter:', - disabled=False, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltips=['Use the default 30Hz low-pass FIR from the Portiloop paper', - 'Use a custom FIR'], - ) - - self.b_filename = widgets.Text( - value='recording.edf', - description='Recording:', - disabled=False - ) - - self.b_frequency = widgets.IntText( - value=self.frequency, - description='Freq (Hz):', - disabled=False - ) - - self.b_threshold = widgets.FloatText( - value=self.threshold, - description='Threshold:', - disabled=True - ) - - self.b_polyak_mean = widgets.FloatText( - value=self.polyak_mean, - description='Polyak mean:', - disabled=False - ) - - self.b_polyak_std = widgets.FloatText( - value=self.polyak_std, - description='Polyak std:', - disabled=False - ) - - self.b_epsilon = widgets.FloatText( - value=self.epsilon, - description='Epsilon:', - disabled=False - ) - - self.b_custom_fir_order = widgets.IntText( - value=self.custom_fir_order, - description='FIR order:', - disabled=True - ) - - self.b_custom_fir_cutoff = widgets.IntText( - value=self.custom_fir_cutoff, - description='FIR cutoff:', - disabled=True - ) - - self.b_use_fir = widgets.Checkbox( - value=self.filter_args[0], - description='Use FIR', - disabled=False, - indent=False - ) - - self.b_use_notch = widgets.Checkbox( - value=self.filter_args[1], - description='Use notch', - disabled=False, - indent=False - ) - - self.b_use_std = widgets.Checkbox( - value=self.filter_args[2], - description='Use standardization', - disabled=False, - indent=False - ) - - self.b_accordion_filter = widgets.Accordion( - children=[ - widgets.VBox([ - self.b_custom_fir, - self.b_custom_fir_order, - self.b_custom_fir_cutoff, - self.b_polyak_mean, - self.b_polyak_std, - self.b_epsilon, - widgets.HBox([ - self.b_use_fir, - self.b_use_notch, - self.b_use_std - ]) - ]) - ]) - self.b_accordion_filter.set_title(index = 0, title = 'Filtering') - - self.b_duration = widgets.IntText( - value=self.duration, - description='Time (s):', - disabled=False - ) - - self.b_filter = widgets.Checkbox( - value=self.filter, - description='Filter', - disabled=False, - indent=False - ) - - self.b_detect = widgets.Checkbox( - value=self.detect, - description='Detect', - disabled=False, - indent=False - ) - - self.b_stimulate = widgets.Checkbox( - value=self.stimulate, - description='Stimulate', - disabled=True, - indent=False - ) - - self.b_record = widgets.Checkbox( - value=self.record, - description='Record EDF', - disabled=False, - indent=False - ) - - self.b_lsl = widgets.Checkbox( - value=self.lsl, - description='Stream LSL', - disabled=False, - indent=False - ) - - self.b_display = widgets.Checkbox( - value=self.display, - description='Display', - disabled=False, - indent=False - ) - - self.b_volume = widgets.IntSlider( - value=self.volume, - min=0, - max=100, - step=1, - description="Volume", - disabled=False - ) - - self.b_test_stimulus = widgets.Button( - description='Test stimulus', - disabled=True, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltip='Send a test stimulus' - ) - - self.b_test_impedance = widgets.Button( - description='Impedance Check', - disabled=False, - button_style='', # 'success', 'info', 'warning', 'danger' or '' - tooltip='Check if electrodes are properly connected' - ) - - # CALLBACKS ---------------------- - - self.b_capture.observe(self.on_b_capture, 'value') - self.b_clock.observe(self.on_b_clock, 'value') - self.b_signal_input.observe(self.on_b_signal_input, 'value') - self.b_frequency.observe(self.on_b_frequency, 'value') - self.b_threshold.observe(self.on_b_threshold, 'value') - self.b_duration.observe(self.on_b_duration, 'value') - self.b_filter.observe(self.on_b_filter, 'value') - self.b_use_fir.observe(self.on_b_use_fir, 'value') - self.b_use_notch.observe(self.on_b_use_notch, 'value') - self.b_use_std.observe(self.on_b_use_std, 'value') - self.b_detect.observe(self.on_b_detect, 'value') - self.b_stimulate.observe(self.on_b_stimulate, 'value') - self.b_record.observe(self.on_b_record, 'value') - self.b_lsl.observe(self.on_b_lsl, 'value') - self.b_display.observe(self.on_b_display, 'value') - self.b_filename.observe(self.on_b_filename, 'value') - self.b_radio_ch2.observe(self.on_b_radio_ch2, 'value') - self.b_radio_ch3.observe(self.on_b_radio_ch3, 'value') - self.b_radio_ch4.observe(self.on_b_radio_ch4, 'value') - self.b_radio_ch5.observe(self.on_b_radio_ch5, 'value') - self.b_radio_ch6.observe(self.on_b_radio_ch6, 'value') - self.b_radio_ch7.observe(self.on_b_radio_ch7, 'value') - self.b_radio_ch8.observe(self.on_b_radio_ch8, 'value') - self.b_channel_detect.observe(self.on_b_channel_detect, 'value') - self.b_spindle_mode.observe(self.on_b_spindle_mode, 'value') - self.b_spindle_freq.observe(self.on_b_spindle_freq, 'value') - self.b_power_line.observe(self.on_b_power_line, 'value') - self.b_signal_input.observe(self.on_b_power_line, 'value') - self.b_custom_fir.observe(self.on_b_custom_fir, 'value') - self.b_custom_fir_order.observe(self.on_b_custom_fir_order, 'value') - self.b_custom_fir_cutoff.observe(self.on_b_custom_fir_cutoff, 'value') - self.b_polyak_mean.observe(self.on_b_polyak_mean, 'value') - self.b_polyak_std.observe(self.on_b_polyak_std, 'value') - self.b_epsilon.observe(self.on_b_epsilon, 'value') - self.b_volume.observe(self.on_b_volume, 'value') - self.b_test_stimulus.on_click(self.on_b_test_stimulus) - self.b_test_impedance.on_click(self.on_b_test_impedance) - self.b_pause.observe(self.on_b_pause, 'value') - - self.display_buttons() - - - def __del__(self): - self.b_capture.close() - - def display_buttons(self): - display(widgets.VBox([self.b_accordion_channels, - self.b_channel_detect, - self.b_frequency, - self.b_duration, - self.b_filename, - self.b_signal_input, - self.b_power_line, - self.b_clock, - widgets.HBox([self.b_filter, self.b_detect, self.b_stimulate, self.b_record, self.b_lsl, self.b_display]), - widgets.HBox([self.b_threshold, self.b_test_stimulus]), - self.b_volume, - widgets.HBox([self.b_spindle_mode, self.b_spindle_freq]), - self.b_test_impedance, - self.b_accordion_filter, - self.b_capture, - self.b_pause])) - - def enable_buttons(self): - self.b_frequency.disabled = False - self.b_duration.disabled = False - self.b_filename.disabled = False - self.b_filter.disabled = False - self.b_detect.disabled = False - self.b_record.disabled = False - self.b_lsl.disabled = False - self.b_display.disabled = False - self.b_clock.disabled = False - self.b_radio_ch2.disabled = False - self.b_radio_ch3.disabled = False - self.b_radio_ch4.disabled = False - self.b_radio_ch5.disabled = False - self.b_radio_ch6.disabled = False - self.b_radio_ch7.disabled = False - self.b_radio_ch8.disabled = False - self.b_power_line.disabled = False - self.b_signal_input.disabled = False - self.b_channel_detect.disabled = False - self.b_spindle_freq.disabled = False - self.b_spindle_mode.disabled = False - self.b_polyak_mean.disabled = False - self.b_polyak_std.disabled = False - self.b_epsilon.disabled = False - self.b_use_fir.disabled = False - self.b_use_notch.disabled = False - self.b_use_std.disabled = False - self.b_custom_fir.disabled = False - self.b_custom_fir_order.disabled = not self.custom_fir - self.b_custom_fir_cutoff.disabled = not self.custom_fir - self.b_stimulate.disabled = not self.detect - self.b_threshold.disabled = not self.detect - self.b_pause.disabled = not self.detect - self.b_test_stimulus.disabled = True # only enabled when running - self.b_test_impedance.disabled = False - - def disable_buttons(self): - self.b_frequency.disabled = True - self.b_duration.disabled = True - self.b_filename.disabled = True - self.b_filter.disabled = True - self.b_stimulate.disabled = True - self.b_filter.disabled = True - self.b_detect.disabled = True - self.b_record.disabled = True - self.b_lsl.disabled = True - self.b_display.disabled = True - self.b_clock.disabled = True - self.b_radio_ch2.disabled = True - self.b_radio_ch3.disabled = True - self.b_radio_ch4.disabled = True - self.b_radio_ch5.disabled = True - self.b_radio_ch6.disabled = True - self.b_radio_ch7.disabled = True - self.b_radio_ch8.disabled = True - self.b_channel_detect.disabled = True - self.b_spindle_freq.disabled = True - self.b_spindle_mode.disabled = True - self.b_signal_input.disabled = True - self.b_power_line.disabled = True - self.b_polyak_mean.disabled = True - self.b_polyak_std.disabled = True - self.b_epsilon.disabled = True - self.b_use_fir.disabled = True - self.b_use_notch.disabled = True - self.b_use_std.disabled = True - self.b_custom_fir.disabled = True - self.b_custom_fir_order.disabled = True - self.b_custom_fir_cutoff.disabled = True - self.b_threshold.disabled = True - self.b_test_stimulus.disabled = not self.stimulate # only enabled when running - self.b_test_impedance.disabled = True - - def on_b_radio_ch2(self, value): - self.channel_states[0] = value['new'] - - def on_b_radio_ch3(self, value): - self.channel_states[1] = value['new'] - - def on_b_radio_ch4(self, value): - self.channel_states[2] = value['new'] - - def on_b_radio_ch5(self, value): - self.channel_states[3] = value['new'] - - def on_b_radio_ch6(self, value): - self.channel_states[4] = value['new'] - - def on_b_radio_ch7(self, value): - self.channel_states[5] = value['new'] - - def on_b_radio_ch8(self, value): - self.channel_states[6] = value['new'] - - def on_b_channel_detect(self, value): - self.channel_detection = value['new'] - - def on_b_spindle_freq(self, value): - val = value['new'] - if val > 0: - self.spindle_freq = val - else: - self.b_spindle_freq.value = self.spindle_freq - - def on_b_spindle_mode(self, value): - self.spindle_detection_mode = value['new'] - - def on_b_capture(self, value): - val = value['new'] - if val == 'Start': - clear_output() - self.disable_buttons() - if not self.python_clock: # ADS clock: force the frequency to an ADS-compatible frequency - self.frequency = to_ads_frequency(self.frequency) - self.b_frequency.value = self.frequency - self.display_buttons() - with self._lock_msg_out: - self._msg_out = None - if self._t_capture is not None: - warnings.warn("Capture already running, operation aborted.") - return - detector_cls = self.detector_cls if self.detect else None - stimulator_cls = self.stimulator_cls if self.stimulate else None - - self._t_capture = Thread(target=self.start_capture, - args=(self.filter, - self.filter_args, - detector_cls, - self.threshold, - self.channel_detection, - stimulator_cls, - self.record, - self.lsl, - self.display, - 2500, - self.python_clock)) - self._t_capture.start() - elif val == 'Stop': - with self._lock_msg_out: - self._msg_out = 'STOP' - assert self._t_capture is not None - self._t_capture.join() - self._t_capture = None - self.enable_buttons() - - def on_b_custom_fir(self, value): - val = value['new'] - if val == 'Default': - self.custom_fir = False - elif val == 'Custom': - self.custom_fir = True - self.enable_buttons() - - def on_b_clock(self, value): - val = value['new'] - if val == 'Coral': - self.python_clock = True - elif val == 'ADS': - self.python_clock = False - - def on_b_signal_input(self, value): - val = value['new'] - if val == "ADS": - self.signal_input = "ADS" - elif val == "File": - self.signal_input = "File" - - def on_b_power_line(self, value): - val = value['new'] - if val == '60 Hz': - self.power_line = 60 - elif val == '50 Hz': - self.power_line = 50 - - def on_b_frequency(self, value): - val = value['new'] - if val > 0: - self.frequency = val - else: - self.b_frequency.value = self.frequency - - def on_b_threshold(self, value): - val = value['new'] - if val >= 0 and val <= 1: - self.threshold = val - else: - self.b_threshold.value = self.threshold - - def on_b_filename(self, value): - val = value['new'] - if val != '': - if not val.endswith('.edf'): - val += '.edf' - self.filename = EDF_PATH / val - else: - now = datetime.now() - self.filename = EDF_PATH / 'recording.edf' - - def on_b_duration(self, value): - val = value['new'] - if val > 0: - self.duration = val - - def on_b_custom_fir_order(self, value): - val = value['new'] - if val > 0: - self.custom_fir_order = val - else: - self.b_custom_fir_order.value = self.custom_fir_order - - def on_b_custom_fir_cutoff(self, value): - val = value['new'] - if val > 0 and val < self.frequency / 2: - self.custom_fir_cutoff = val - else: - self.b_custom_fir_cutoff.value = self.custom_fir_cutoff - - def on_b_polyak_mean(self, value): - val = value['new'] - if val >= 0 and val <= 1: - self.polyak_mean = val - else: - self.b_polyak_mean.value = self.polyak_mean - - def on_b_polyak_std(self, value): - val = value['new'] - if val >= 0 and val <= 1: - self.polyak_std = val - else: - self.b_polyak_std.value = self.polyak_std - - def on_b_epsilon(self, value): - val = value['new'] - if val > 0 and val < 0.1: - self.epsilon = val - else: - self.b_epsilon.value = self.epsilon - - def on_b_filter(self, value): - val = value['new'] - self.filter = val - - def on_b_use_fir(self, value): - val = value['new'] - self.filter_args[0] = val - - def on_b_use_notch(self, value): - val = value['new'] - self.filter_args[1] = val - - def on_b_use_std(self, value): - val = value['new'] - self.filter_args[2] = val - - def on_b_stimulate(self, value): - val = value['new'] - self.stimulate = val - - def on_b_detect(self, value): - val = value['new'] - self.detect = val - self.enable_buttons() - - def on_b_record(self, value): - val = value['new'] - self.record = val - - def on_b_lsl(self, value): - val = value['new'] - self.lsl = val - - def on_b_display(self, value): - val = value['new'] - self.display = val - - def on_b_volume(self, value): - val = value['new'] - if val >= 0 and val <= 100: - self.volume = val - self.mixer.setvolume(self.volume) - - def on_b_test_stimulus(self, b): - with self._test_stimulus_lock: - self._test_stimulus = True - - def on_b_test_impedance(self, b): - frontend = Frontend() - - def is_set(x, n): - return x & 1 << n != 0 - - try: - frontend.write_regs(0x00, LEADOFF_CONFIG) - frontend.start() - start_time = time.time() - current_time = time.time() - while current_time - start_time < 2: - current_time = time.time() - reading = frontend.read() - - # Check if any of the negative bits are set and initialize the impedance array -# impedance_check = [any([is_set(leadoff_n, i) for i in range(2, 9)])] - impedance_check = [any([reading.loff_n(i) for i in range(7)])] - - for i in range(7): - impedance_check.append(reading.loff_p(i)) - - def print_impedance(impedance): - names = ["Ref", "Ch2", "Ch3", "Ch4", "Ch5", "Ch6", "Ch7", "Ch8"] - vals = [' Y ' if val else ' N ' for val in impedance] - print(' '.join(str(name) for name in names)) - print(' '.join(str(val) for val in vals)) - - print_impedance(impedance_check) - - finally: - frontend.close() - - def on_b_pause(self, value): - val = value['new'] - if val == 'Active': - with self._pause_detect_lock: - self._pause_detect = False - elif val == 'Paused': - with self._pause_detect_lock: - self._pause_detect = True - - def start_capture(self, - filter, - filter_args, - detector_cls, - threshold, - channel, - stimulator_cls, - record, - lsl, - viz, - width, - python_clock): - - if self.signal_input == "ADS": - if self.__capture_on: - warnings.warn("Capture is already ongoing, ignoring command.") - return - else: - self.__capture_on = True - p_msg_io, p_msg_io_2 = mp.Pipe() - p_data_i, p_data_o = mp.Pipe(duplex=False) - else: - p_msg_io, _ = mp.Pipe() - - # Initialize filtering pipeline - if filter: - fp = FilterPipeline(nb_channels=8, - sampling_rate=self.frequency, - power_line_fq=self.power_line, - use_custom_fir=self.custom_fir, - custom_fir_order=self.custom_fir_order, - custom_fir_cutoff=self.custom_fir_cutoff, - alpha_avg=self.polyak_mean, - alpha_std=self.polyak_std, - epsilon=self.epsilon, - filter_args=filter_args) - - # Initialize detector and stimulator - detector = detector_cls(threshold, channel=channel) if detector_cls is not None else None - stimulator = stimulator_cls() if stimulator_cls is not None else None - - # Launch the capture process - if self.signal_input == "ADS": - self._p_capture = mp.Process(target=capture_process, - args=(p_data_o, - p_msg_io_2, - self.duration, - self.frequency, - python_clock, - 1.0, - self.channel_states) - ) - self._p_capture.start() - print(f"PID capture: {self._p_capture.pid}") - else: - filename = RECORDING_PATH / 'test_recording.csv' - file_reader = FileReader(filename) - - # Initialize display if requested - if viz: - live_disp = LiveDisplay(channel_names = self.signal_labels, window_len=width) - - # Initialize recording if requested - if record: - recorder = EDFRecorder(self.signal_labels, self.filename, self.frequency) - recorder.open_recording_file() - - # Initialize LSL to stream if requested - if lsl: - from pylsl import StreamInfo, StreamOutlet - lsl_info = StreamInfo(name='Portiloop Filtered', - type='Filtered EEG', - channel_count=8, - nominal_srate=self.frequency, - channel_format='float32', - source_id='portiloop1') # TODO: replace this by unique device identifier - lsl_outlet = StreamOutlet(lsl_info) - lsl_info_raw = StreamInfo(name='Portiloop Raw Data', - type='Raw EEG signal', - channel_count=8, - nominal_srate=self.frequency, - channel_format='float32', - source_id='portiloop1') # TODO: replace this by unique device identifier - lsl_outlet_raw = StreamOutlet(lsl_info_raw) - - buffer = [] - - # Initialize stimulation delayer if requested - if not self.spindle_detection_mode == 'Fast' and stimulator is not None: - stimulation_delayer = UpStateDelayer(self.frequency, self.spindle_detection_mode == 'Peak', 0.3) - stimulator.add_delayer(stimulation_delayer) - else: - stimulation_delayer = None - - # Main capture loop - while True: - if self.signal_input == "ADS": - # Send message in communication pipe if we have one - with self._lock_msg_out: - if self._msg_out is not None: - p_msg_io.send(self._msg_out) - self._msg_out = None - - # Check if we have received a message in communication pipe - if p_msg_io.poll(): - mess = p_msg_io.recv() - if mess == 'STOP': - break - elif mess[0] == 'PRT': - print(mess[1]) - - # Retrieve all data points from data pipe p_data - point = None - if p_data_i.poll(timeout=(1 / self.frequency)): - point = p_data_i.recv() - else: - continue - - # Convert point from int to corresponding value in microvolts - n_array_raw = int_to_float(np.array([point])) - elif self.signal_input == "File": - # Check if the message to stop has been sent - with self._lock_msg_out: - if self._msg_out == "STOP": - break - - file_point = file_reader.get_point() - if file_point is None: - break - index, raw_point, off_filtered_point, past_stimulation, lacourse_stimulation = file_point - n_array_raw = np.array([0, raw_point, 0, 0, 0, 0, 0, 0]) - n_array_raw = np.reshape(n_array_raw, (1, 8)) - - # Go through filtering pipeline - if filter: - n_array = fp.filter(deepcopy(n_array_raw)) - else: - n_array = deepcopy(n_array_raw) - - # Contains the filtered point (if filtering is off, contains a copy of the raw point) - filtered_point = n_array.tolist() - - # Send both raw and filtered points over LSL - if lsl: - raw_point = n_array_raw.tolist() - lsl_outlet_raw.push_sample(raw_point[-1]) - lsl_outlet.push_sample(filtered_point[-1]) - - # Adds point to buffer for delayed stimulation - if stimulation_delayer is not None: - stimulation_delayer.step_timesteps(filtered_point[0][channel-1]) - - # Check if detection is on or off - with self._pause_detect_lock: - pause = self._pause_detect - - # If detection is on - if detector is not None and not pause: - # Detect using the latest point - detection_signal = detector.detect(filtered_point) - - # Stimulate - if stimulator is not None: - stimulator.stimulate(detection_signal) - with self._test_stimulus_lock: - test_stimulus = self._test_stimulus - self._test_stimulus = False - if test_stimulus: - stimulator.test_stimulus() - - # Send the stimulation from the file reader - if stimulator is not None: - if self.signal_input == "File" and lacourse_stimulation: - stimulator.send_stimulation("GROUND_TRUTH_STIM", False) - - # Add point to the buffer to send to viz and recorder - buffer += filtered_point - if len(buffer) >= 50: - if viz: - live_disp.add_datapoints(buffer) - if record: - recorder.add_recording_data(buffer) - buffer = [] - - if self.signal_input == "ADS": - # Empty pipes - while True: - if p_data_i.poll(): - _ = p_data_i.recv() - elif p_msg_io.poll(): - _ = p_msg_io.recv() - else: - break - - p_data_i.close() - p_msg_io.close() - self._p_capture.join() - self.__capture_on = False - - if record: - recorder.close_recording_file() - - -if __name__ == "__main__": - pass diff --git a/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h b/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h deleted file mode 100644 index ad1311a78f61303616504eb991aaa9c4a93d9948..0000000000000000000000000000000000000000 --- a/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h +++ /dev/null @@ -1,33 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor ms_deform_attn_cuda_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector ms_deform_attn_cuda_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/NAACL2022/GlobEnc/src/task_loaders.py b/spaces/NAACL2022/GlobEnc/src/task_loaders.py deleted file mode 100644 index c88800c267705fba0384fe03a88e2d993d9f1109..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/GlobEnc/src/task_loaders.py +++ /dev/null @@ -1,52 +0,0 @@ -from datasets import load_dataset - - -class TaskLoader: - def __init__(self, tokenizer, count=None, max_length=128) -> None: - self.tokenizer = tokenizer - self.count = "" if count is None else count - self.max_length = max_length - - def load_task(self, task): - task_loaders = { - "mnli": self.__load_mnli, - "sst2": self.__load_sst2, - "hatexplain": self.__load_hatexplain - } - return task_loaders[task]() - - def __load_mnli(self): - dataset = load_dataset("glue", "mnli", split=f"validation_mismatched[:{self.count}]") - return ( - dataset, - lambda idx: self.tokenizer.encode_plus(dataset[idx]["premise"], dataset[idx]["hypothesis"], - return_tensors="pt", - max_length=self.max_length, - truncation=True) - ) - - def __load_sst2(self): - dataset = load_dataset("glue", "sst2", split=f"validation[:{self.count}]") - return ( - dataset, - lambda idx: self.tokenizer.encode_plus(dataset[idx]["sentence"], return_tensors="pt", - max_length=self.max_length, - truncation=True), - ) - - def __load_hatexplain(self): - def mode(lst): - return max(set(lst), key=lst.count) - - def update_data(example): - example["label"] = mode(example["annotators"]["label"]) - example["text"] = " ".join(example["post_tokens"]) - return example - - dataset = load_dataset("hatexplain", split=f"validation[:{self.count}]").map(update_data) - return ( - dataset, - lambda idx: self.tokenizer.encode_plus(dataset[idx]["text"], return_tensors="pt", - max_length=self.max_length, - truncation=True), - ) diff --git a/spaces/NATSpeech/PortaSpeech/mfa_usr/adapt.py b/spaces/NATSpeech/PortaSpeech/mfa_usr/adapt.py deleted file mode 100644 index d1f509b9af8cf53d2b8fc910ac1eb41f441b8054..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/mfa_usr/adapt.py +++ /dev/null @@ -1,201 +0,0 @@ -import shutil -import os -import time -from montreal_forced_aligner import __version__ -from montreal_forced_aligner.corpus.align_corpus import AlignableCorpus -from montreal_forced_aligner.dictionary import Dictionary, MultispeakerDictionary -from montreal_forced_aligner.aligner import TrainableAligner, PretrainedAligner -from montreal_forced_aligner.models import AcousticModel -from montreal_forced_aligner.config import TEMP_DIR, align_yaml_to_config, load_basic_align, load_command_configuration, \ - train_yaml_to_config -from montreal_forced_aligner.utils import get_available_acoustic_languages, get_pretrained_acoustic_path, \ - get_available_dict_languages, validate_dictionary_arg -from montreal_forced_aligner.helper import setup_logger, log_config -from montreal_forced_aligner.exceptions import ArgumentError - - -def load_adapt_config(): - training_config, align_config = train_yaml_to_config('mfa_usr/adapt_config.yaml', require_mono=False) - training_config.training_configs[0].fmllr_iterations = list( - range(0, training_config.training_configs[0].num_iterations)) - training_config.training_configs[0].realignment_iterations = list(range(0, training_config.training_configs[ - 0].num_iterations)) - return training_config, align_config - - -class AcousticModel2(AcousticModel): - def adaptation_config(self): - train, align = load_adapt_config() - return train - - -def adapt_model(args, unknown_args=None): - command = 'align' - all_begin = time.time() - if not args.temp_directory: - temp_dir = TEMP_DIR - else: - temp_dir = os.path.expanduser(args.temp_directory) - corpus_name = os.path.basename(args.corpus_directory) - if corpus_name == '': - args.corpus_directory = os.path.dirname(args.corpus_directory) - corpus_name = os.path.basename(args.corpus_directory) - data_directory = os.path.join(temp_dir, corpus_name) - if args.config_path: - align_config = align_yaml_to_config(args.config_path) - else: - align_config = load_basic_align() - align_config.use_mp = not args.disable_mp - align_config.debug = args.debug - align_config.overwrite = args.overwrite - align_config.cleanup_textgrids = not args.disable_textgrid_cleanup - - if unknown_args: - align_config.update_from_args(unknown_args) - conf_path = os.path.join(data_directory, 'config.yml') - if getattr(args, 'clean', False) and os.path.exists(data_directory): - print('Cleaning old directory!') - shutil.rmtree(data_directory, ignore_errors=True) - if getattr(args, 'verbose', False): - log_level = 'debug' - else: - log_level = 'info' - logger = setup_logger(command, data_directory, console_level=log_level) - logger.debug('ALIGN CONFIG:') - log_config(logger, align_config) - conf = load_command_configuration(conf_path, {'dirty': False, - 'begin': all_begin, - 'version': __version__, - 'type': command, - 'corpus_directory': args.corpus_directory, - 'dictionary_path': args.dictionary_path, - 'acoustic_model_path': args.acoustic_model_path}) - if conf['dirty'] or conf['type'] != command \ - or conf['corpus_directory'] != args.corpus_directory \ - or conf['version'] != __version__ \ - or conf['dictionary_path'] != args.dictionary_path: - logger.warning( - 'WARNING: Using old temp directory, this might not be ideal for you, use the --clean flag to ensure no ' - 'weird behavior for previous versions of the temporary directory.') - if conf['dirty']: - logger.debug('Previous run ended in an error (maybe ctrl-c?)') - if conf['type'] != command: - logger.debug('Previous run was a different subcommand than {} (was {})'.format(command, conf['type'])) - if conf['corpus_directory'] != args.corpus_directory: - logger.debug('Previous run used source directory ' - 'path {} (new run: {})'.format(conf['corpus_directory'], args.corpus_directory)) - if conf['version'] != __version__: - logger.debug('Previous run was on {} version (new run: {})'.format(conf['version'], __version__)) - if conf['dictionary_path'] != args.dictionary_path: - logger.debug('Previous run used dictionary path {} ' - '(new run: {})'.format(conf['dictionary_path'], args.dictionary_path)) - if conf['acoustic_model_path'] != args.acoustic_model_path: - logger.debug('Previous run used acoustic model path {} ' - '(new run: {})'.format(conf['acoustic_model_path'], args.acoustic_model_path)) - - os.makedirs(data_directory, exist_ok=True) - model_directory = os.path.join(data_directory, 'acoustic_models') - os.makedirs(model_directory, exist_ok=True) - acoustic_model = AcousticModel2(args.acoustic_model_path, root_directory=model_directory) - print("| acoustic_model.meta", acoustic_model.meta) - acoustic_model.log_details(logger) - training_config = acoustic_model.adaptation_config() - training_config.training_configs[0].update({'beam': align_config.beam, 'retry_beam': align_config.retry_beam}) - training_config.update_from_align(align_config) - logger.debug('ADAPT TRAINING CONFIG:') - log_config(logger, training_config) - audio_dir = None - if args.audio_directory: - audio_dir = args.audio_directory - try: - corpus = AlignableCorpus(args.corpus_directory, data_directory, - speaker_characters=args.speaker_characters, - num_jobs=args.num_jobs, sample_rate=align_config.feature_config.sample_frequency, - logger=logger, use_mp=align_config.use_mp, punctuation=align_config.punctuation, - clitic_markers=align_config.clitic_markers, audio_directory=audio_dir) - if corpus.issues_check: - logger.warning('Some issues parsing the corpus were detected. ' - 'Please run the validator to get more information.') - logger.info(corpus.speaker_utterance_info()) - if args.dictionary_path.lower().endswith('.yaml'): - dictionary = MultispeakerDictionary(args.dictionary_path, data_directory, logger=logger, - punctuation=align_config.punctuation, - clitic_markers=align_config.clitic_markers, - compound_markers=align_config.compound_markers, - multilingual_ipa=acoustic_model.meta['multilingual_ipa'], - strip_diacritics=acoustic_model.meta.get('strip_diacritics', None), - digraphs=acoustic_model.meta.get('digraphs', None)) - else: - dictionary = Dictionary(args.dictionary_path, data_directory, logger=logger, - punctuation=align_config.punctuation, - clitic_markers=align_config.clitic_markers, - compound_markers=align_config.compound_markers, - multilingual_ipa=acoustic_model.meta['multilingual_ipa'], - strip_diacritics=acoustic_model.meta.get('strip_diacritics', None), - digraphs=acoustic_model.meta.get('digraphs', None)) - acoustic_model.validate(dictionary) - - begin = time.time() - previous = PretrainedAligner(corpus, dictionary, acoustic_model, align_config, - temp_directory=data_directory, - debug=getattr(args, 'debug', False), logger=logger) - a = TrainableAligner(corpus, dictionary, training_config, align_config, - temp_directory=data_directory, - debug=getattr(args, 'debug', False), logger=logger, pretrained_aligner=previous) - logger.debug('Setup adapter in {} seconds'.format(time.time() - begin)) - a.verbose = args.verbose - - begin = time.time() - a.train() - logger.debug('Performed adaptation in {} seconds'.format(time.time() - begin)) - - begin = time.time() - a.save(args.output_model_path, root_directory=model_directory) - a.export_textgrids(args.output_directory) - logger.debug('Exported TextGrids in {} seconds'.format(time.time() - begin)) - logger.info('All done!') - - except Exception as _: - conf['dirty'] = True - raise - finally: - handlers = logger.handlers[:] - for handler in handlers: - handler.close() - logger.removeHandler(handler) - conf.save(conf_path) - - -def validate_args(args, downloaded_acoustic_models, download_dictionaries): - if not os.path.exists(args.corpus_directory): - raise ArgumentError('Could not find the corpus directory {}.'.format(args.corpus_directory)) - if not os.path.isdir(args.corpus_directory): - raise ArgumentError('The specified corpus directory ({}) is not a directory.'.format(args.corpus_directory)) - - args.dictionary_path = validate_dictionary_arg(args.dictionary_path, download_dictionaries) - - if args.acoustic_model_path.lower() in downloaded_acoustic_models: - args.acoustic_model_path = get_pretrained_acoustic_path(args.acoustic_model_path.lower()) - elif args.acoustic_model_path.lower().endswith(AcousticModel.extension): - if not os.path.exists(args.acoustic_model_path): - raise ArgumentError('The specified model path does not exist: ' + args.acoustic_model_path) - else: - raise ArgumentError( - 'The language \'{}\' is not currently included in the distribution, ' - 'please align via training or specify one of the following language names: {}.'.format( - args.acoustic_model_path.lower(), ', '.join(downloaded_acoustic_models))) - - -def run_adapt_model(args, unknown_args=None, downloaded_acoustic_models=None, download_dictionaries=None): - if downloaded_acoustic_models is None: - downloaded_acoustic_models = get_available_acoustic_languages() - if download_dictionaries is None: - download_dictionaries = get_available_dict_languages() - try: - args.speaker_characters = int(args.speaker_characters) - except ValueError: - pass - args.corpus_directory = args.corpus_directory.rstrip('/').rstrip('\\') - - validate_args(args, downloaded_acoustic_models, download_dictionaries) - adapt_model(args, unknown_args) diff --git a/spaces/Najaf-Zawar/Image-Super-Resolution/README.md b/spaces/Najaf-Zawar/Image-Super-Resolution/README.md deleted file mode 100644 index d2bc833060e109fe230b4c601495338f4ab92096..0000000000000000000000000000000000000000 --- a/spaces/Najaf-Zawar/Image-Super-Resolution/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Super Resolution -emoji: 🏆 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: artistic-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Najaf-Zawar/Old_Image-Restoration/app.py b/spaces/Najaf-Zawar/Old_Image-Restoration/app.py deleted file mode 100644 index 4619555a48e6d18e0e25144950b25ebdcfe8d4f5..0000000000000000000000000000000000000000 --- a/spaces/Najaf-Zawar/Old_Image-Restoration/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import cv2 -import numpy -import os -import torch -import random -from basicsr.archs.rrdbnet_arch import RRDBNet -from realesrgan import RealESRGANer - -import gradio as gr -from skimage.restoration import inpaint as p -import torchvision.transforms as transforms - - -def restore_image(input_image): - img = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY) - threshold_value = 200 - _, mask = cv2.threshold(img, threshold_value, 255, cv2.THRESH_BINARY) - channels = cv2.split(img) - inpaint_channels = [] - for channel in channels: - inpaint_result = p.inpaint_biharmonic(channel, mask) - inpaint_channels.append(inpaint_result) - - result_img = cv2.merge(inpaint_channels) - filename = "output.jpg" - cv2.imwrite(filename, result_img) - return filename - - -# Define the Gradio app interface -inputs = gr.Image(label="Upload Image") -outputs = gr.Image(label="Restored_Image.") -title = "Image Restoration Using Pix2Pix-GAN" -description = "Restore the Quality of your Old damaged Images To New Looking Images Using Artificial Intelligence" - -iface = gr.Interface(fn=restore_image, inputs=inputs, outputs=outputs, title=title, description=description, allow_flagging="never") -iface.launch(inline = False) \ No newline at end of file diff --git a/spaces/Neo-Salvatore/translate-locale/README.md b/spaces/Neo-Salvatore/translate-locale/README.md deleted file mode 100644 index 30cfd33ad839b639366caab1d4a4132fdc4548be..0000000000000000000000000000000000000000 --- a/spaces/Neo-Salvatore/translate-locale/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Translate Locale -emoji: 🐨 -colorFrom: green -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nickhilearla135095/maximum_diffusion/README.md b/spaces/Nickhilearla135095/maximum_diffusion/README.md deleted file mode 100644 index 661ae5617283e3a44b31c8cffea5a772f74b3ad0..0000000000000000000000000000000000000000 --- a/spaces/Nickhilearla135095/maximum_diffusion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Maximum Diffusion -emoji: 🛕 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: true -duplicated_from: Omnibus/maximum_diffusion ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NoriZC/vits-models/transforms.py b/spaces/NoriZC/vits-models/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/NoriZC/vits-models/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Nultx/VITS-TTS/monotonic_align/__init__.py b/spaces/Nultx/VITS-TTS/monotonic_align/__init__.py deleted file mode 100644 index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000 --- a/spaces/Nultx/VITS-TTS/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import torch -from .monotonic_align.core import maximum_path_c - - -def maximum_path(neg_cent, mask): - """ Cython optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/benchmark/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/benchmark/__init__.py deleted file mode 100644 index 0317d5c623778fe40b7bf07b77769cd10c243244..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/benchmark/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# import models/tasks to register them -from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh deleted file mode 100644 index c10a6b8809b77bca2b2c02df8b8702725bdd51c7..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -split="dev_other" -ref_txt="" # ground truth transcript path -psd_txt="" # pseudo transcript path -get_best_wer=true -dec_name="decode" -graph_name="graph" -kenlm_path=/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o6.bin -phonemize_lexicon="" - -. ./cmd.sh -. ./path.sh -. parse_options.sh -. /private/home/wnhsu/unsup_asr/fairseq-py-unsup/env.sh - -exp_root=$1 - -set -eu - -if [ ! -z $ref_txt ] && $get_best_wer; then - echo "==== WER w.r.t. real transcript (select based on unsupervised metric)" - for x in $exp_root/*/${dec_name}_${split}*; do - lang=$(dirname $x)/$graph_name - - for tra in $x/scoring/*.tra; do - cat $tra | utils/int2sym.pl -f 2- $lang/words.txt | sed 's:\::g' > $tra.txt - python local/unsup_select.py $psd_txt $tra.txt \ - --kenlm_path $kenlm_path --gt_tra $ref_txt --phonemize \ - --phonemize_lexicon "$phonemize_lexicon" - done | grep "score=" | sed 's/=/ /g' | sed 's/;//g' | sort -k3n | head -n1 - done -fi - - diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/scripts/prep_data.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/scripts/prep_data.py deleted file mode 100644 index 7aa7d37edc2c3e4c1d293911b753abf2ef597a7e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/scripts/prep_data.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python - -import argparse -from multiprocessing import Pool -from pathlib import Path - -import sacrebleu -import sentencepiece as spm - - -def read_text_file(filename): - with open(filename, "r") as f: - output = [line.strip() for line in f] - - return output - - -def get_bleu(in_sent, target_sent): - bleu = sacrebleu.corpus_bleu([in_sent], [[target_sent]]) - out = " ".join( - map(str, [bleu.score, bleu.sys_len, bleu.ref_len] + bleu.counts + bleu.totals) - ) - return out - - -def get_ter(in_sent, target_sent): - ter = sacrebleu.corpus_ter([in_sent], [[target_sent]]) - out = " ".join(map(str, [ter.score, ter.num_edits, ter.ref_length])) - return out - - -def init(sp_model): - global sp - sp = spm.SentencePieceProcessor() - sp.Load(sp_model) - - -def process(source_sent, target_sent, hypo_sent, metric): - source_bpe = " ".join(sp.EncodeAsPieces(source_sent)) - hypo_bpe = [" ".join(sp.EncodeAsPieces(h)) for h in hypo_sent] - - if metric == "bleu": - score_str = [get_bleu(h, target_sent) for h in hypo_sent] - else: # ter - score_str = [get_ter(h, target_sent) for h in hypo_sent] - - return source_bpe, hypo_bpe, score_str - - -def main(args): - assert ( - args.split.startswith("train") or args.num_shards == 1 - ), "--num-shards should be set to 1 for valid and test sets" - assert ( - args.split.startswith("train") - or args.split.startswith("valid") - or args.split.startswith("test") - ), "--split should be set to train[n]/valid[n]/test[n]" - - source_sents = read_text_file(args.input_source) - target_sents = read_text_file(args.input_target) - - num_sents = len(source_sents) - assert num_sents == len( - target_sents - ), f"{args.input_source} and {args.input_target} should have the same number of sentences." - - hypo_sents = read_text_file(args.input_hypo) - assert ( - len(hypo_sents) % args.beam == 0 - ), f"Number of hypotheses ({len(hypo_sents)}) cannot be divided by beam size ({args.beam})." - - hypo_sents = [ - hypo_sents[i : i + args.beam] for i in range(0, len(hypo_sents), args.beam) - ] - assert num_sents == len( - hypo_sents - ), f"{args.input_hypo} should contain {num_sents * args.beam} hypotheses but only has {len(hypo_sents) * args.beam}. (--beam={args.beam})" - - output_dir = args.output_dir / args.metric - for ns in range(args.num_shards): - print(f"processing shard {ns+1}/{args.num_shards}") - shard_output_dir = output_dir / f"split{ns+1}" - source_output_dir = shard_output_dir / "input_src" - hypo_output_dir = shard_output_dir / "input_tgt" - metric_output_dir = shard_output_dir / args.metric - - source_output_dir.mkdir(parents=True, exist_ok=True) - hypo_output_dir.mkdir(parents=True, exist_ok=True) - metric_output_dir.mkdir(parents=True, exist_ok=True) - - if args.n_proc > 1: - with Pool( - args.n_proc, initializer=init, initargs=(args.sentencepiece_model,) - ) as p: - output = p.starmap( - process, - [ - (source_sents[i], target_sents[i], hypo_sents[i], args.metric) - for i in range(ns, num_sents, args.num_shards) - ], - ) - else: - init(args.sentencepiece_model) - output = [ - process(source_sents[i], target_sents[i], hypo_sents[i], args.metric) - for i in range(ns, num_sents, args.num_shards) - ] - - with open(source_output_dir / f"{args.split}.bpe", "w") as s_o, open( - hypo_output_dir / f"{args.split}.bpe", "w" - ) as h_o, open(metric_output_dir / f"{args.split}.{args.metric}", "w") as m_o: - for source_bpe, hypo_bpe, score_str in output: - assert len(hypo_bpe) == len(score_str) - for h, m in zip(hypo_bpe, score_str): - s_o.write(f"{source_bpe}\n") - h_o.write(f"{h}\n") - m_o.write(f"{m}\n") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--input-source", type=Path, required=True) - parser.add_argument("--input-target", type=Path, required=True) - parser.add_argument("--input-hypo", type=Path, required=True) - parser.add_argument("--output-dir", type=Path, required=True) - parser.add_argument("--split", type=str, required=True) - parser.add_argument("--beam", type=int, required=True) - parser.add_argument("--sentencepiece-model", type=str, required=True) - parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu") - parser.add_argument("--num-shards", type=int, default=1) - parser.add_argument("--n-proc", type=int, default=8) - - args = parser.parse_args() - - main(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/w2l_decoder.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/w2l_decoder.py deleted file mode 100644 index fbf2d3524ee40bd0d08b6a9560047d96e49b6045..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/w2l_decoder.py +++ /dev/null @@ -1,486 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Flashlight decoders. -""" - -import gc -import itertools as it -import os.path as osp -from typing import List -import warnings -from collections import deque, namedtuple - -import numpy as np -import torch -from examples.speech_recognition.data.replabels import unpack_replabels -from fairseq import tasks -from fairseq.utils import apply_to_sample -from omegaconf import open_dict -from fairseq.dataclass.utils import convert_namespace_to_omegaconf - - -try: - from flashlight.lib.text.dictionary import create_word_dict, load_words - from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes - from flashlight.lib.text.decoder import ( - CriterionType, - LexiconDecoderOptions, - KenLM, - LM, - LMState, - SmearingMode, - Trie, - LexiconDecoder, - ) -except: - warnings.warn( - "flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python" - ) - LM = object - LMState = object - - -class W2lDecoder(object): - def __init__(self, args, tgt_dict): - self.tgt_dict = tgt_dict - self.vocab_size = len(tgt_dict) - self.nbest = args.nbest - - # criterion-specific init - self.criterion_type = CriterionType.CTC - self.blank = ( - tgt_dict.index("") - if "" in tgt_dict.indices - else tgt_dict.bos() - ) - if "" in tgt_dict.indices: - self.silence = tgt_dict.index("") - elif "|" in tgt_dict.indices: - self.silence = tgt_dict.index("|") - else: - self.silence = tgt_dict.eos() - self.asg_transitions = None - - def generate(self, models, sample, **unused): - """Generate a batch of inferences.""" - # model.forward normally channels prev_output_tokens into the decoder - # separately, but SequenceGenerator directly calls model.encoder - encoder_input = { - k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" - } - emissions = self.get_emissions(models, encoder_input) - return self.decode(emissions) - - def get_emissions(self, models, encoder_input): - """Run encoder and normalize emissions""" - model = models[0] - encoder_out = model(**encoder_input) - if hasattr(model, "get_logits"): - emissions = model.get_logits(encoder_out) # no need to normalize emissions - else: - emissions = model.get_normalized_probs(encoder_out, log_probs=True) - return emissions.transpose(0, 1).float().cpu().contiguous() - - def get_tokens(self, idxs): - """Normalize tokens by handling CTC blank, ASG replabels, etc.""" - idxs = (g[0] for g in it.groupby(idxs)) - idxs = filter(lambda x: x != self.blank, idxs) - return torch.LongTensor(list(idxs)) - - -class W2lViterbiDecoder(W2lDecoder): - def __init__(self, args, tgt_dict): - super().__init__(args, tgt_dict) - - def decode(self, emissions): - B, T, N = emissions.size() - hypos = [] - if self.asg_transitions is None: - transitions = torch.FloatTensor(N, N).zero_() - else: - transitions = torch.FloatTensor(self.asg_transitions).view(N, N) - viterbi_path = torch.IntTensor(B, T) - workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N)) - CpuViterbiPath.compute( - B, - T, - N, - get_data_ptr_as_bytes(emissions), - get_data_ptr_as_bytes(transitions), - get_data_ptr_as_bytes(viterbi_path), - get_data_ptr_as_bytes(workspace), - ) - return [ - [{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}] - for b in range(B) - ] - - -class W2lKenLMDecoder(W2lDecoder): - def __init__(self, args, tgt_dict): - super().__init__(args, tgt_dict) - - self.unit_lm = getattr(args, "unit_lm", False) - - if args.lexicon: - self.lexicon = load_words(args.lexicon) - self.word_dict = create_word_dict(self.lexicon) - self.unk_word = self.word_dict.get_index("") - - self.lm = KenLM(args.kenlm_model, self.word_dict) - self.trie = Trie(self.vocab_size, self.silence) - - start_state = self.lm.start(False) - for i, (word, spellings) in enumerate(self.lexicon.items()): - word_idx = self.word_dict.get_index(word) - _, score = self.lm.score(start_state, word_idx) - for spelling in spellings: - spelling_idxs = [tgt_dict.index(token) for token in spelling] - assert ( - tgt_dict.unk() not in spelling_idxs - ), f"{spelling} {spelling_idxs}" - self.trie.insert(spelling_idxs, word_idx, score) - self.trie.smear(SmearingMode.MAX) - - self.decoder_opts = LexiconDecoderOptions( - beam_size=args.beam, - beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), - beam_threshold=args.beam_threshold, - lm_weight=args.lm_weight, - word_score=args.word_score, - unk_score=args.unk_weight, - sil_score=args.sil_weight, - log_add=False, - criterion_type=self.criterion_type, - ) - - if self.asg_transitions is None: - N = 768 - # self.asg_transitions = torch.FloatTensor(N, N).zero_() - self.asg_transitions = [] - - self.decoder = LexiconDecoder( - self.decoder_opts, - self.trie, - self.lm, - self.silence, - self.blank, - self.unk_word, - self.asg_transitions, - self.unit_lm, - ) - else: - assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" - from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions - - d = {w: [[w]] for w in tgt_dict.symbols} - self.word_dict = create_word_dict(d) - self.lm = KenLM(args.kenlm_model, self.word_dict) - self.decoder_opts = LexiconFreeDecoderOptions( - beam_size=args.beam, - beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), - beam_threshold=args.beam_threshold, - lm_weight=args.lm_weight, - sil_score=args.sil_weight, - log_add=False, - criterion_type=self.criterion_type, - ) - self.decoder = LexiconFreeDecoder( - self.decoder_opts, self.lm, self.silence, self.blank, [] - ) - - def get_timesteps(self, token_idxs: List[int]) -> List[int]: - """Returns frame numbers corresponding to every non-blank token. - - Parameters - ---------- - token_idxs : List[int] - IDs of decoded tokens. - - Returns - ------- - List[int] - Frame numbers corresponding to every non-blank token. - """ - timesteps = [] - for i, token_idx in enumerate(token_idxs): - if token_idx == self.blank: - continue - if i == 0 or token_idx != token_idxs[i-1]: - timesteps.append(i) - return timesteps - - def decode(self, emissions): - B, T, N = emissions.size() - hypos = [] - for b in range(B): - emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) - results = self.decoder.decode(emissions_ptr, T, N) - - nbest_results = results[: self.nbest] - hypos.append( - [ - { - "tokens": self.get_tokens(result.tokens), - "score": result.score, - "timesteps": self.get_timesteps(result.tokens), - "words": [ - self.word_dict.get_entry(x) for x in result.words if x >= 0 - ], - } - for result in nbest_results - ] - ) - return hypos - - -FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"]) - - -class FairseqLM(LM): - def __init__(self, dictionary, model): - LM.__init__(self) - self.dictionary = dictionary - self.model = model - self.unk = self.dictionary.unk() - - self.save_incremental = False # this currently does not work properly - self.max_cache = 20_000 - - model.cuda() - model.eval() - model.make_generation_fast_() - - self.states = {} - self.stateq = deque() - - def start(self, start_with_nothing): - state = LMState() - prefix = torch.LongTensor([[self.dictionary.eos()]]) - incremental_state = {} if self.save_incremental else None - with torch.no_grad(): - res = self.model(prefix.cuda(), incremental_state=incremental_state) - probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) - - if incremental_state is not None: - incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) - self.states[state] = FairseqLMState( - prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() - ) - self.stateq.append(state) - - return state - - def score(self, state: LMState, token_index: int, no_cache: bool = False): - """ - Evaluate language model based on the current lm state and new word - Parameters: - ----------- - state: current lm state - token_index: index of the word - (can be lexicon index then you should store inside LM the - mapping between indices of lexicon and lm, or lm index of a word) - - Returns: - -------- - (LMState, float): pair of (new state, score for the current word) - """ - curr_state = self.states[state] - - def trim_cache(targ_size): - while len(self.stateq) > targ_size: - rem_k = self.stateq.popleft() - rem_st = self.states[rem_k] - rem_st = FairseqLMState(rem_st.prefix, None, None) - self.states[rem_k] = rem_st - - if curr_state.probs is None: - new_incremental_state = ( - curr_state.incremental_state.copy() - if curr_state.incremental_state is not None - else None - ) - with torch.no_grad(): - if new_incremental_state is not None: - new_incremental_state = apply_to_sample( - lambda x: x.cuda(), new_incremental_state - ) - elif self.save_incremental: - new_incremental_state = {} - - res = self.model( - torch.from_numpy(curr_state.prefix).cuda(), - incremental_state=new_incremental_state, - ) - probs = self.model.get_normalized_probs( - res, log_probs=True, sample=None - ) - - if new_incremental_state is not None: - new_incremental_state = apply_to_sample( - lambda x: x.cpu(), new_incremental_state - ) - - curr_state = FairseqLMState( - curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() - ) - - if not no_cache: - self.states[state] = curr_state - self.stateq.append(state) - - score = curr_state.probs[token_index].item() - - trim_cache(self.max_cache) - - outstate = state.child(token_index) - if outstate not in self.states and not no_cache: - prefix = np.concatenate( - [curr_state.prefix, torch.LongTensor([[token_index]])], -1 - ) - incr_state = curr_state.incremental_state - - self.states[outstate] = FairseqLMState(prefix, incr_state, None) - - if token_index == self.unk: - score = float("-inf") - - return outstate, score - - def finish(self, state: LMState): - """ - Evaluate eos for language model based on the current lm state - - Returns: - -------- - (LMState, float): pair of (new state, score for the current word) - """ - return self.score(state, self.dictionary.eos()) - - def empty_cache(self): - self.states = {} - self.stateq = deque() - gc.collect() - - -class W2lFairseqLMDecoder(W2lDecoder): - def __init__(self, args, tgt_dict): - super().__init__(args, tgt_dict) - - self.unit_lm = getattr(args, "unit_lm", False) - - self.lexicon = load_words(args.lexicon) if args.lexicon else None - self.idx_to_wrd = {} - - checkpoint = torch.load(args.kenlm_model, map_location="cpu") - - if "cfg" in checkpoint and checkpoint["cfg"] is not None: - lm_args = checkpoint["cfg"] - else: - lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) - - with open_dict(lm_args.task): - lm_args.task.data = osp.dirname(args.kenlm_model) - - task = tasks.setup_task(lm_args.task) - model = task.build_model(lm_args.model) - model.load_state_dict(checkpoint["model"], strict=False) - - self.trie = Trie(self.vocab_size, self.silence) - - self.word_dict = task.dictionary - self.unk_word = self.word_dict.unk() - self.lm = FairseqLM(self.word_dict, model) - - if self.lexicon: - start_state = self.lm.start(False) - for i, (word, spellings) in enumerate(self.lexicon.items()): - if self.unit_lm: - word_idx = i - self.idx_to_wrd[i] = word - score = 0 - else: - word_idx = self.word_dict.index(word) - _, score = self.lm.score(start_state, word_idx, no_cache=True) - - for spelling in spellings: - spelling_idxs = [tgt_dict.index(token) for token in spelling] - assert ( - tgt_dict.unk() not in spelling_idxs - ), f"{spelling} {spelling_idxs}" - self.trie.insert(spelling_idxs, word_idx, score) - self.trie.smear(SmearingMode.MAX) - - self.decoder_opts = LexiconDecoderOptions( - beam_size=args.beam, - beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), - beam_threshold=args.beam_threshold, - lm_weight=args.lm_weight, - word_score=args.word_score, - unk_score=args.unk_weight, - sil_score=args.sil_weight, - log_add=False, - criterion_type=self.criterion_type, - ) - - self.decoder = LexiconDecoder( - self.decoder_opts, - self.trie, - self.lm, - self.silence, - self.blank, - self.unk_word, - [], - self.unit_lm, - ) - else: - assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" - from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions - - d = {w: [[w]] for w in tgt_dict.symbols} - self.word_dict = create_word_dict(d) - self.lm = KenLM(args.kenlm_model, self.word_dict) - self.decoder_opts = LexiconFreeDecoderOptions( - beam_size=args.beam, - beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), - beam_threshold=args.beam_threshold, - lm_weight=args.lm_weight, - sil_score=args.sil_weight, - log_add=False, - criterion_type=self.criterion_type, - ) - self.decoder = LexiconFreeDecoder( - self.decoder_opts, self.lm, self.silence, self.blank, [] - ) - - def decode(self, emissions): - B, T, N = emissions.size() - hypos = [] - - def idx_to_word(idx): - if self.unit_lm: - return self.idx_to_wrd[idx] - else: - return self.word_dict[idx] - - def make_hypo(result): - hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score} - if self.lexicon: - hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0] - return hypo - - for b in range(B): - emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) - results = self.decoder.decode(emissions_ptr, T, N) - - nbest_results = results[: self.nbest] - hypos.append([make_hypo(result) for result in nbest_results]) - self.lm.empty_cache() - - return hypos diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py deleted file mode 100644 index c2bd16efb530af5af3f72ab0edb3044b4e9fcd5c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import fasttext as ft -import os -import regex -import sys - - -def get_parser(): - parser = argparse.ArgumentParser( - description="reads text from stdin and outputs normalized, lid-filtered version to stdout" - ) - parser.add_argument( - "--fasttext-model", - help="path to fasttext model", - default="lid.187.bin", - ) - parser.add_argument("--lang", help="language id", required=True) - parser.add_argument( - "--lid-threshold", - type=float, - help="threshold for this lang id probability", - default=0.4, - ) - - return parser - - -def main(): - parser = get_parser() - args = parser.parse_args() - filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]") - - lg = args.lang.lower() - lg_label = f"__label__{lg}" - thresh = args.lid_threshold - - if os.path.exists(args.fasttext_model): - model = ft.load_model(args.fasttext_model) - else: - print( - f"fasttext language id model {args.fasttext_model} not found. Proceeding without language filtering. " - f"To enable language filtering, please download the latest language id model " - f"from https://fasttext.cc/docs/en/language-identification.html", - file=sys.stderr, - ) - model = None - - for line in sys.stdin: - line = line.strip() - line = filter_r.sub(" ", line) - line = " ".join(line.split()) - - if model is not None: - lid, prob = model.predict(line, k=100) - try: - target_idx = lid.index(lg_label) - except ValueError: - continue - if target_idx == 0 or prob[target_idx] >= thresh: - print(line) - else: - print(line) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/wav2vec_manifest.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/wav2vec_manifest.py deleted file mode 100644 index 9b8aa180e88d9ee98bdca7089aed5046ec0d9cb9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/wav2vec_manifest.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Data pre-processing: build vocabularies and binarize training data. -""" - -import argparse -import glob -import os -import random - -import soundfile - - -def get_parser(): - parser = argparse.ArgumentParser() - parser.add_argument( - "root", metavar="DIR", help="root directory containing flac files to index" - ) - parser.add_argument( - "--valid-percent", - default=0.01, - type=float, - metavar="D", - help="percentage of data to use as validation set (between 0 and 1)", - ) - parser.add_argument( - "--dest", default=".", type=str, metavar="DIR", help="output directory" - ) - parser.add_argument( - "--ext", default="flac", type=str, metavar="EXT", help="extension to look for" - ) - parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed") - parser.add_argument( - "--path-must-contain", - default=None, - type=str, - metavar="FRAG", - help="if set, path must contain this substring for a file to be included in the manifest", - ) - return parser - - -def main(args): - assert args.valid_percent >= 0 and args.valid_percent <= 1.0 - - if not os.path.exists(args.dest): - os.makedirs(args.dest) - - dir_path = os.path.realpath(args.root) - search_path = os.path.join(dir_path, "**/*." + args.ext) - rand = random.Random(args.seed) - - valid_f = ( - open(os.path.join(args.dest, "valid.tsv"), "w") - if args.valid_percent > 0 - else None - ) - - with open(os.path.join(args.dest, "train.tsv"), "w") as train_f: - print(dir_path, file=train_f) - - if valid_f is not None: - print(dir_path, file=valid_f) - - for fname in glob.iglob(search_path, recursive=True): - file_path = os.path.realpath(fname) - - if args.path_must_contain and args.path_must_contain not in file_path: - continue - - frames = soundfile.info(fname).frames - dest = train_f if rand.random() > args.valid_percent else valid_f - print( - "{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest - ) - if valid_f is not None: - valid_f.close() - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - main(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/audio/feature_transforms/global_cmvn.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/audio/feature_transforms/global_cmvn.py deleted file mode 100644 index e457ff176fee3b996da11f47e7dc61b81c445ba3..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/audio/feature_transforms/global_cmvn.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -from fairseq.data.audio.feature_transforms import ( - AudioFeatureTransform, - register_audio_feature_transform, -) - - -@register_audio_feature_transform("global_cmvn") -class GlobalCMVN(AudioFeatureTransform): - """Global CMVN (cepstral mean and variance normalization). The global mean - and variance need to be pre-computed and stored in NumPy format (.npz).""" - - @classmethod - def from_config_dict(cls, config=None): - _config = {} if config is None else config - return GlobalCMVN(_config.get("stats_npz_path")) - - def __init__(self, stats_npz_path): - self.stats_npz_path = stats_npz_path - stats = np.load(stats_npz_path) - self.mean, self.std = stats["mean"], stats["std"] - - def __repr__(self): - return self.__class__.__name__ + f'(stats_npz_path="{self.stats_npz_path}")' - - def __call__(self, x): - x = np.subtract(x, self.mean) - x = np.divide(x, self.std) - return x diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_token_block_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_token_block_dataset.py deleted file mode 100644 index c4d7b76dcd55fe7869dbb1fa188f7b36fb639bda..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_token_block_dataset.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import tests.utils as test_utils -import torch -from fairseq.data import TokenBlockDataset - - -class TestTokenBlockDataset(unittest.TestCase): - def _build_dataset(self, data, **kwargs): - sizes = [len(x) for x in data] - underlying_ds = test_utils.TestDataset(data) - return TokenBlockDataset(underlying_ds, sizes, **kwargs) - - def test_eos_break_mode(self): - data = [ - torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), - torch.tensor([1], dtype=torch.long), - torch.tensor([8, 7, 6, 1], dtype=torch.long), - ] - ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") - self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) - self.assertEqual(ds[1].tolist(), [1]) - self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) - - data = [ - torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), - torch.tensor([8, 7, 6, 1], dtype=torch.long), - torch.tensor([1], dtype=torch.long), - ] - ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") - self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) - self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) - self.assertEqual(ds[2].tolist(), [1]) - - def test_block_break_mode(self): - data = [ - torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), - torch.tensor([8, 7, 6, 1], dtype=torch.long), - torch.tensor([9, 1], dtype=torch.long), - ] - ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none") - self.assertEqual(ds[0].tolist(), [5, 4, 3]) - self.assertEqual(ds[1].tolist(), [2, 1, 8]) - self.assertEqual(ds[2].tolist(), [7, 6, 1]) - self.assertEqual(ds[3].tolist(), [9, 1]) - - def test_complete_break_mode(self): - data = [ - torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), - torch.tensor([8, 7, 6, 1], dtype=torch.long), - torch.tensor([9, 1], dtype=torch.long), - ] - ds = self._build_dataset( - data, block_size=6, pad=0, eos=1, break_mode="complete" - ) - self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) - self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) - - data = [ - torch.tensor([4, 3, 2, 1], dtype=torch.long), - torch.tensor([5, 1], dtype=torch.long), - torch.tensor([1], dtype=torch.long), - torch.tensor([6, 1], dtype=torch.long), - ] - ds = self._build_dataset( - data, block_size=3, pad=0, eos=1, break_mode="complete" - ) - self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) - self.assertEqual(ds[1].tolist(), [5, 1, 1]) - self.assertEqual(ds[2].tolist(), [6, 1]) - - def test_4billion_tokens(self): - """Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745""" - data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000 - ds = self._build_dataset( - data, block_size=6, pad=0, eos=1, break_mode="complete" - ) - ds[-1] # __getitem__ works - start, end = ds.slice_indices[-1] - assert end > 4294967295 # data must be sufficiently large to overflow uint32 - assert not isinstance( - end + 1, float - ) # this would also raise, since np.uint64(1) + 1 => 2.0 - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/train.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/train.py deleted file mode 100644 index b6ed02bd59f540ca58df20bf72d462f195210a32..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/train.py +++ /dev/null @@ -1,18 +0,0 @@ -# Common training-related configs that are designed for "tools/lazyconfig_train_net.py" -# You can use your own instead, together with your own train_net.py -train = dict( - output_dir="./output", - init_checkpoint="", - max_iter=90000, - amp=dict(enabled=False), # options for Automatic Mixed Precision - ddp=dict( # options for DistributedDataParallel - broadcast_buffers=False, - find_unused_parameters=False, - fp16_compression=False, - ), - checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer - eval_period=5000, - log_period=20, - device="cuda" - # ... -) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh deleted file mode 100644 index bc9dcc56f06f79fc5efa42c04ffdc07c2787e3ac..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -e -# Copyright (c) Facebook, Inc. and its affiliates. - -BIN="python tools/train_net.py" -OUTPUT="inference_test_output" -NUM_GPUS=2 - -CFG_LIST=( "${@:1}" ) - -if [ ${#CFG_LIST[@]} -eq 0 ]; then - CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) -fi - -echo "========================================================================" -echo "Configs to run:" -echo "${CFG_LIST[@]}" -echo "========================================================================" - - -for cfg in "${CFG_LIST[@]}"; do - echo "========================================================================" - echo "Running $cfg ..." - echo "========================================================================" - $BIN \ - --eval-only \ - --num-gpus $NUM_GPUS \ - --config-file "$cfg" \ - OUTPUT_DIR $OUTPUT - rm -rf $OUTPUT -done - - -echo "========================================================================" -echo "Running demo.py ..." -echo "========================================================================" -DEMO_BIN="python demo/demo.py" -COCO_DIR=datasets/coco/val2014 -mkdir -pv $OUTPUT - -set -v - -$DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ - --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT -rm -rf $OUTPUT diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/node.py b/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/node.py deleted file mode 100644 index 1f37f7856cc732a37dc58253022a7c331489493e..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/node.py +++ /dev/null @@ -1,263 +0,0 @@ -"""Nodes, conforming to the glTF 2.0 standards as specified in -https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-node - -Author: Matthew Matl -""" -import numpy as np - -import trimesh.transformations as transformations - -from .camera import Camera -from .mesh import Mesh -from .light import Light - - -class Node(object): - """A node in the node hierarchy. - - Parameters - ---------- - name : str, optional - The user-defined name of this object. - camera : :class:`Camera`, optional - The camera in this node. - children : list of :class:`Node` - The children of this node. - skin : int, optional - The index of the skin referenced by this node. - matrix : (4,4) float, optional - A floating-point 4x4 transformation matrix. - mesh : :class:`Mesh`, optional - The mesh in this node. - rotation : (4,) float, optional - The node's unit quaternion in the order (x, y, z, w), where - w is the scalar. - scale : (3,) float, optional - The node's non-uniform scale, given as the scaling factors along the x, - y, and z axes. - translation : (3,) float, optional - The node's translation along the x, y, and z axes. - weights : (n,) float - The weights of the instantiated Morph Target. Number of elements must - match number of Morph Targets of used mesh. - light : :class:`Light`, optional - The light in this node. - """ - - def __init__(self, - name=None, - camera=None, - children=None, - skin=None, - matrix=None, - mesh=None, - rotation=None, - scale=None, - translation=None, - weights=None, - light=None): - # Set defaults - if children is None: - children = [] - - self._matrix = None - self._scale = None - self._rotation = None - self._translation = None - if matrix is None: - if rotation is None: - rotation = np.array([0.0, 0.0, 0.0, 1.0]) - if translation is None: - translation = np.zeros(3) - if scale is None: - scale = np.ones(3) - self.rotation = rotation - self.translation = translation - self.scale = scale - else: - self.matrix = matrix - - self.name = name - self.camera = camera - self.children = children - self.skin = skin - self.mesh = mesh - self.weights = weights - self.light = light - - @property - def name(self): - """str : The user-defined name of this object. - """ - return self._name - - @name.setter - def name(self, value): - if value is not None: - value = str(value) - self._name = value - - @property - def camera(self): - """:class:`Camera` : The camera in this node. - """ - return self._camera - - @camera.setter - def camera(self, value): - if value is not None and not isinstance(value, Camera): - raise TypeError('Value must be a camera') - self._camera = value - - @property - def children(self): - """list of :class:`Node` : The children of this node. - """ - return self._children - - @children.setter - def children(self, value): - self._children = value - - @property - def skin(self): - """int : The skin index for this node. - """ - return self._skin - - @skin.setter - def skin(self, value): - self._skin = value - - @property - def mesh(self): - """:class:`Mesh` : The mesh in this node. - """ - return self._mesh - - @mesh.setter - def mesh(self, value): - if value is not None and not isinstance(value, Mesh): - raise TypeError('Value must be a mesh') - self._mesh = value - - @property - def light(self): - """:class:`Light` : The light in this node. - """ - return self._light - - @light.setter - def light(self, value): - if value is not None and not isinstance(value, Light): - raise TypeError('Value must be a light') - self._light = value - - @property - def rotation(self): - """(4,) float : The xyzw quaternion for this node. - """ - return self._rotation - - @rotation.setter - def rotation(self, value): - value = np.asanyarray(value) - if value.shape != (4,): - raise ValueError('Quaternion must be a (4,) vector') - if np.abs(np.linalg.norm(value) - 1.0) > 1e-3: - raise ValueError('Quaternion must have norm == 1.0') - self._rotation = value - self._matrix = None - - @property - def translation(self): - """(3,) float : The translation for this node. - """ - return self._translation - - @translation.setter - def translation(self, value): - value = np.asanyarray(value) - if value.shape != (3,): - raise ValueError('Translation must be a (3,) vector') - self._translation = value - self._matrix = None - - @property - def scale(self): - """(3,) float : The scale for this node. - """ - return self._scale - - @scale.setter - def scale(self, value): - value = np.asanyarray(value) - if value.shape != (3,): - raise ValueError('Scale must be a (3,) vector') - self._scale = value - self._matrix = None - - @property - def matrix(self): - """(4,4) float : The homogenous transform matrix for this node. - - Note that this matrix's elements are not settable, - it's just a copy of the internal matrix. You can set the whole - matrix, but not an individual element. - """ - if self._matrix is None: - self._matrix = self._m_from_tqs( - self.translation, self.rotation, self.scale - ) - return self._matrix.copy() - - @matrix.setter - def matrix(self, value): - value = np.asanyarray(value) - if value.shape != (4,4): - raise ValueError('Matrix must be a 4x4 numpy ndarray') - if not np.allclose(value[3,:], np.array([0.0, 0.0, 0.0, 1.0])): - raise ValueError('Bottom row of matrix must be [0,0,0,1]') - self.rotation = Node._q_from_m(value) - self.scale = Node._s_from_m(value) - self.translation = Node._t_from_m(value) - self._matrix = value - - @staticmethod - def _t_from_m(m): - return m[:3,3] - - @staticmethod - def _r_from_m(m): - U = m[:3,:3] - norms = np.linalg.norm(U.T, axis=1) - return U / norms - - @staticmethod - def _q_from_m(m): - M = np.eye(4) - M[:3,:3] = Node._r_from_m(m) - q_wxyz = transformations.quaternion_from_matrix(M) - return np.roll(q_wxyz, -1) - - @staticmethod - def _s_from_m(m): - return np.linalg.norm(m[:3,:3].T, axis=1) - - @staticmethod - def _r_from_q(q): - q_wxyz = np.roll(q, 1) - return transformations.quaternion_matrix(q_wxyz)[:3,:3] - - @staticmethod - def _m_from_tqs(t, q, s): - S = np.eye(4) - S[:3,:3] = np.diag(s) - - R = np.eye(4) - R[:3,:3] = Node._r_from_q(q) - - T = np.eye(4) - T[:3,3] = t - - return T.dot(R.dot(S)) diff --git a/spaces/OpenShape/openshape-demo/app.py b/spaces/OpenShape/openshape-demo/app.py deleted file mode 100644 index 60bbdb2ed4e785fb04cb85956a78df50e42a9b8d..0000000000000000000000000000000000000000 --- a/spaces/OpenShape/openshape-demo/app.py +++ /dev/null @@ -1,331 +0,0 @@ -import sys -import threading -import streamlit as st -from huggingface_hub import HfFolder, snapshot_download - - -@st.cache_data -def load_support(): - if st.secrets.has_key('etoken'): - HfFolder().save_token(st.secrets['etoken']) - sys.path.append(snapshot_download("OpenShape/openshape-demo-support")) - - -# st.set_page_config(layout='wide') -load_support() - - -import numpy -import torch -import openshape -import transformers -from PIL import Image - -@st.cache_resource -def load_openshape(name, to_cpu=False): - pce = openshape.load_pc_encoder(name) - if to_cpu: - pce = pce.cpu() - return pce - - -@st.cache_resource -def load_openclip(): - sys.clip_move_lock = threading.Lock() - clip_model, clip_prep = transformers.CLIPModel.from_pretrained( - "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", - low_cpu_mem_usage=True, torch_dtype=half, - offload_state_dict=True - ), transformers.CLIPProcessor.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") - if torch.cuda.is_available(): - with sys.clip_move_lock: - clip_model.cuda() - return clip_model, clip_prep - - -f32 = numpy.float32 -half = torch.float16 if torch.cuda.is_available() else torch.bfloat16 -# clip_model, clip_prep = None, None -clip_model, clip_prep = load_openclip() -model_b32 = load_openshape('openshape-pointbert-vitb32-rgb', True) -model_l14 = load_openshape('openshape-pointbert-vitl14-rgb') -model_g14 = load_openshape('openshape-pointbert-vitg14-rgb') -torch.set_grad_enabled(False) -for kc, vc in st.session_state.get('state_queue', []): - st.session_state[kc] = vc -st.session_state.state_queue = [] - - -import samples_index -from openshape.demo import misc_utils, classification, caption, sd_pc2img, retrieval - - -st.title("OpenShape Demo") -st.caption("For faster inference without waiting in queue, you may clone the space and run it yourself.") -prog = st.progress(0.0, "Idle") -tab_cls, tab_img, tab_text, tab_pc, tab_sd, tab_cap = st.tabs([ - "Classification", - "Retrieval w/ Image", - "Retrieval w/ Text", - "Retrieval w/ 3D", - "Image Generation", - "Captioning", -]) - - -def sq(kc, vc): - st.session_state.state_queue.append((kc, vc)) - - -def reset_3d_shape_input(key): - # this is not working due to streamlit problems, don't use it - model_key = key + "_model" - npy_key = key + "_npy" - swap_key = key + "_swap" - sq(model_key, None) - sq(npy_key, None) - sq(swap_key, "Y is up (for most Objaverse shapes)") - - -def auto_submit(key): - if st.session_state.get(key): - st.session_state[key] = False - return True - return False - - -def queue_auto_submit(key): - st.session_state[key] = True - st.experimental_rerun() - - -img_example_counter = 0 - - -def image_examples(samples, ncols, return_key=None, example_text="Examples"): - global img_example_counter - trigger = False - with st.expander(example_text, True): - for i in range(len(samples) // ncols): - cols = st.columns(ncols) - for j in range(ncols): - idx = i * ncols + j - if idx >= len(samples): - continue - entry = samples[idx] - with cols[j]: - st.image(entry['dispi']) - img_example_counter += 1 - with st.columns(5)[2]: - this_trigger = st.button('\+', key='imgexuse%d' % img_example_counter) - trigger = trigger or this_trigger - if this_trigger: - if return_key is None: - for k, v in entry.items(): - if not k.startswith('disp'): - sq(k, v) - else: - trigger = entry[return_key] - return trigger - - -def demo_classification(): - with st.form("clsform"): - load_data = misc_utils.input_3d_shape('cls') - cats = st.text_input("Custom Categories (64 max, separated with comma)") - cats = [a.strip() for a in cats.split(',')] - if len(cats) > 64: - st.error('Maximum 64 custom categories supported in the demo') - return - lvis_run = st.form_submit_button("Run Classification on LVIS Categories") - custom_run = st.form_submit_button("Run Classification on Custom Categories") - if lvis_run or auto_submit("clsauto"): - pc = load_data(prog) - col2 = misc_utils.render_pc(pc) - prog.progress(0.5, "Running Classification") - pred = classification.pred_lvis_sims(model_g14, pc) - with col2: - for i, (cat, sim) in zip(range(5), pred.items()): - st.text(cat) - st.caption("Similarity %.4f" % sim) - prog.progress(1.0, "Idle") - if custom_run: - pc = load_data(prog) - col2 = misc_utils.render_pc(pc) - prog.progress(0.5, "Computing Category Embeddings") - device = clip_model.device - tn = clip_prep(text=cats, return_tensors='pt', truncation=True, max_length=76, padding=True).to(device) - feats = clip_model.get_text_features(**tn).float().cpu() - prog.progress(0.5, "Running Classification") - pred = classification.pred_custom_sims(model_g14, pc, cats, feats) - with col2: - for i, (cat, sim) in zip(range(5), pred.items()): - st.text(cat) - st.caption("Similarity %.4f" % sim) - prog.progress(1.0, "Idle") - if image_examples(samples_index.classification, 3, example_text="Examples (Choose one of the following 3D shapes)"): - queue_auto_submit("clsauto") - - -def demo_captioning(): - with st.form("capform"): - load_data = misc_utils.input_3d_shape('cap') - cond_scale = st.slider('Conditioning Scale', 0.0, 4.0, 2.0, 0.1, key='capcondscl') - if st.form_submit_button("Generate a Caption") or auto_submit("capauto"): - pc = load_data(prog) - col2 = misc_utils.render_pc(pc) - prog.progress(0.5, "Running Generation") - cap = caption.pc_caption(model_b32, pc, cond_scale) - st.text(cap) - prog.progress(1.0, "Idle") - if image_examples(samples_index.cap, 3, example_text="Examples (Choose one of the following 3D shapes)"): - queue_auto_submit("capauto") - - -def demo_pc2img(): - with st.form("sdform"): - load_data = misc_utils.input_3d_shape('sd') - prompt = st.text_input("Prompt (Optional)", key='sdtprompt') - noise_scale = st.slider('Variation Level', 0, 5, 1) - cfg_scale = st.slider('Guidance Scale', 0.0, 30.0, 10.0) - steps = st.slider('Diffusion Steps', 8, 50, 25) - width = 640 # st.slider('Width', 480, 640, step=32) - height = 640 # st.slider('Height', 480, 640, step=32) - if st.form_submit_button("Generate") or auto_submit("sdauto"): - pc = load_data(prog) - col2 = misc_utils.render_pc(pc) - prog.progress(0.49, "Running Generation") - if torch.cuda.is_available(): - with sys.clip_move_lock: - clip_model.cpu() - img = sd_pc2img.pc_to_image( - model_l14, pc, prompt, noise_scale, width, height, cfg_scale, steps, - lambda i, t, _: prog.progress(0.49 + i / (steps + 1) / 2, "Running Diffusion Step %d" % i) - ) - if torch.cuda.is_available(): - with sys.clip_move_lock: - clip_model.cuda() - with col2: - st.image(img) - prog.progress(1.0, "Idle") - if image_examples(samples_index.sd, 3, example_text="Examples (Choose one of the following 3D shapes)"): - queue_auto_submit("sdauto") - - -def retrieval_results(results): - st.caption("Click the link to view the 3D shape") - for i in range(len(results) // 4): - cols = st.columns(4) - for j in range(4): - idx = i * 4 + j - if idx >= len(results): - continue - entry = results[idx] - with cols[j]: - ext_link = f"https://objaverse.allenai.org/explore/?query={entry['u']}" - st.image(entry['img']) - # st.markdown(f"[![thumbnail {entry['desc'].replace('\n', ' ')}]({entry['img']})]({ext_link})") - # st.text(entry['name']) - quote_name = entry['name'].replace('[', '\\[').replace(']', '\\]').replace('\n', ' ') - st.markdown(f"[{quote_name}]({ext_link})") - - -def retrieval_filter_expand(key): - with st.expander("Filters"): - sim_th = st.slider("Similarity Threshold", 0.05, 0.5, 0.1, key=key + 'rtsimth') - tag = st.text_input("Has Tag", "", key=key + 'rthastag') - col1, col2 = st.columns(2) - face_min = int(col1.text_input("Face Count Min", "0", key=key + 'rtfcmin')) - face_max = int(col2.text_input("Face Count Max", "34985808", key=key + 'rtfcmax')) - col1, col2 = st.columns(2) - anim_min = int(col1.text_input("Animation Count Min", "0", key=key + 'rtacmin')) - anim_max = int(col2.text_input("Animation Count Max", "563", key=key + 'rtacmax')) - tag_n = not bool(tag.strip()) - anim_n = not (anim_min > 0 or anim_max < 563) - face_n = not (face_min > 0 or face_max < 34985808) - filter_fn = lambda x: ( - (anim_n or anim_min <= x['anims'] <= anim_max) - and (face_n or face_min <= x['faces'] <= face_max) - and (tag_n or tag in x['tags']) - ) - return sim_th, filter_fn - - -def demo_retrieval(): - with tab_text: - with st.form("rtextform"): - k = st.slider("Shapes to Retrieve", 1, 100, 16, key='rtext') - text = st.text_input("Input Text", key="inputrtext") - sim_th, filter_fn = retrieval_filter_expand('text') - if st.form_submit_button("Run with Text") or auto_submit("rtextauto"): - prog.progress(0.49, "Computing Embeddings") - device = clip_model.device - tn = clip_prep( - text=[text], return_tensors='pt', truncation=True, max_length=76 - ).to(device) - enc = clip_model.get_text_features(**tn).float().cpu() - prog.progress(0.7, "Running Retrieval") - retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn)) - prog.progress(1.0, "Idle") - picked_sample = st.selectbox("Examples", ["Select..."] + samples_index.retrieval_texts) - text_last_example = st.session_state.get('text_last_example', None) - if text_last_example is None: - st.session_state.text_last_example = picked_sample - elif text_last_example != picked_sample and picked_sample != "Select...": - st.session_state.text_last_example = picked_sample - sq("inputrtext", picked_sample) - queue_auto_submit("rtextauto") - - with tab_img: - submit = False - with st.form("rimgform"): - k = st.slider("Shapes to Retrieve", 1, 100, 16, key='rimage') - pic = st.file_uploader("Upload an Image", key='rimageinput') - sim_th, filter_fn = retrieval_filter_expand('image') - if st.form_submit_button("Run with Image"): - submit = True - results_container = st.container() - sample_got = image_examples(samples_index.iret, 4, 'rimageinput') - if sample_got: - pic = sample_got - if sample_got or submit: - img = Image.open(pic) - with results_container: - st.image(img) - prog.progress(0.49, "Computing Embeddings") - device = clip_model.device - tn = clip_prep(images=[img], return_tensors="pt").to(device) - enc = clip_model.get_image_features(pixel_values=tn['pixel_values'].type(half)).float().cpu() - prog.progress(0.7, "Running Retrieval") - retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn)) - prog.progress(1.0, "Idle") - - with tab_pc: - with st.form("rpcform"): - k = st.slider("Shapes to Retrieve", 1, 100, 16, key='rpc') - load_data = misc_utils.input_3d_shape('retpc') - sim_th, filter_fn = retrieval_filter_expand('pc') - if st.form_submit_button("Run with Shape") or auto_submit('rpcauto'): - pc = load_data(prog) - col2 = misc_utils.render_pc(pc) - prog.progress(0.49, "Computing Embeddings") - ref_dev = next(model_g14.parameters()).device - enc = model_g14(torch.tensor(pc[:, [0, 2, 1, 3, 4, 5]].T[None], device=ref_dev)).cpu() - prog.progress(0.7, "Running Retrieval") - retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn)) - prog.progress(1.0, "Idle") - if image_examples(samples_index.pret, 3): - queue_auto_submit("rpcauto") - - -try: - with tab_cls: - demo_classification() - with tab_cap: - demo_captioning() - with tab_sd: - demo_pc2img() - demo_retrieval() -except Exception: - import traceback - st.error(traceback.format_exc().replace("\n", " \n")) diff --git a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/trainers/lion_cheetah_search_train.py b/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/trainers/lion_cheetah_search_train.py deleted file mode 100644 index 518056a6b13f90d9415f6cd4deba89c331f948a2..0000000000000000000000000000000000000000 --- a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/trainers/lion_cheetah_search_train.py +++ /dev/null @@ -1,80 +0,0 @@ - -from fake_face_detection.metrics.compute_metrics import compute_metrics -from fake_face_detection.data.lion_cheetah_collator import lion_cheetah_collator -from transformers import Trainer, TrainingArguments, set_seed -from torch.utils.tensorboard import SummaryWriter -from torch import nn -from typing import * -import numpy as np -import json -import os - -def train(epochs: int, output_dir: str, config: dict, model: nn.Module, trainer, get_datasets: Callable, log_dir: str = "fake_face_logs", metric = 'accuracy', seed: int = 0): - - print("------------------------- Beginning of training") - - set_seed(seed) - - # initialize the model - model = model() - - # reformat the config integer type - for key, value in config.items(): - - if isinstance(value, np.int32): config[key] = int(value) - - pretty = json.dumps(config, indent = 4) - - print(f"Current Config: \n {pretty}") - - print(f"Checkpoints in {output_dir}") - - # recuperate the dataset - train_dataset, test_dataset = get_datasets(config['h_flip_p'], config['v_flip_p'], config['gray_scale_p'], config['rotation']) - - # initialize the arguments of the training - training_args = TrainingArguments(output_dir, - per_device_train_batch_size=config['batch_size'], - evaluation_strategy='steps', - save_strategy='steps', - logging_strategy='steps', - num_train_epochs=epochs, - fp16=True, - save_total_limit=2, - remove_unused_columns=True, - push_to_hub=False, - logging_dir=os.path.join(log_dir, os.path.basename(output_dir)), - load_best_model_at_end=True, - learning_rate=config['lr'], - weight_decay=config['weight_decay'] - ) - - # train the model - trainer_ = trainer( - model = model, - args = training_args, - data_collator = lion_cheetah_collator, - compute_metrics = compute_metrics, - train_dataset = train_dataset, - eval_dataset = test_dataset - ) - - # train the model - trainer_.train() - - # evaluate the model and recuperate metrics - metrics = trainer_.evaluate(test_dataset) - - # add metrics and config to the hyperparameter panel of tensorboard - with SummaryWriter(os.path.join(log_dir, 'lchparams')) as logger: - - logger.add_hparams( - config, metrics - ) - - print(metrics) - - print("------------------------- End of training") - # recuperate the metric to evaluate - return metrics[f'eval_{metric}'] - diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/arraymisc/quantization.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/arraymisc/quantization.py deleted file mode 100644 index 8e47a3545780cf071a1ef8195efb0b7b662c8186..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/arraymisc/quantization.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np - - -def quantize(arr, min_val, max_val, levels, dtype=np.int64): - """Quantize an array of (-inf, inf) to [0, levels-1]. - - Args: - arr (ndarray): Input array. - min_val (scalar): Minimum value to be clipped. - max_val (scalar): Maximum value to be clipped. - levels (int): Quantization levels. - dtype (np.type): The type of the quantized array. - - Returns: - tuple: Quantized array. - """ - if not (isinstance(levels, int) and levels > 1): - raise ValueError( - f'levels must be a positive integer, but got {levels}') - if min_val >= max_val: - raise ValueError( - f'min_val ({min_val}) must be smaller than max_val ({max_val})') - - arr = np.clip(arr, min_val, max_val) - min_val - quantized_arr = np.minimum( - np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1) - - return quantized_arr - - -def dequantize(arr, min_val, max_val, levels, dtype=np.float64): - """Dequantize an array. - - Args: - arr (ndarray): Input array. - min_val (scalar): Minimum value to be clipped. - max_val (scalar): Maximum value to be clipped. - levels (int): Quantization levels. - dtype (np.type): The type of the dequantized array. - - Returns: - tuple: Dequantized array. - """ - if not (isinstance(levels, int) and levels > 1): - raise ValueError( - f'levels must be a positive integer, but got {levels}') - if min_val >= max_val: - raise ValueError( - f'min_val ({min_val}) must be smaller than max_val ({max_val})') - - dequantized_arr = (arr + 0.5).astype(dtype) * (max_val - - min_val) / levels + min_val - - return dequantized_arr diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/weight_init.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/weight_init.py deleted file mode 100644 index 38141ba3d61f64ddfc0a31574b4648cbad96d7dd..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/weight_init.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Modified from https://github.com/rwightman/pytorch-image- -models/blob/master/timm/models/layers/drop.py.""" - -import math -import warnings - -import torch - - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - """Reference: https://people.sc.fsu.edu/~jburkardt/presentations - /truncated_normal.pdf""" - - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn( - 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' - 'The distribution of values may be incorrect.', - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - lower_bound = norm_cdf((a - mean) / std) - upper_bound = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * lower_bound - 1, 2 * upper_bound - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor (``torch.Tensor``): an n-dimensional `torch.Tensor` - mean (float): the mean of the normal distribution - std (float): the standard deviation of the normal distribution - a (float): the minimum cutoff value - b (float): the maximum cutoff value - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/pipelines/loading.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/pipelines/loading.py deleted file mode 100644 index d3692ae91f19b9c7ccf6023168788ff42c9e93e3..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/pipelines/loading.py +++ /dev/null @@ -1,153 +0,0 @@ -import os.path as osp - -import annotator.uniformer.mmcv as mmcv -import numpy as np - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class LoadImageFromFile(object): - """Load an image from file. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename"). Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: - 'cv2' - """ - - def __init__(self, - to_float32=False, - color_type='color', - file_client_args=dict(backend='disk'), - imdecode_backend='cv2'): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - self.imdecode_backend = imdecode_backend - - def __call__(self, results): - """Call functions to load image and get image meta information. - - Args: - results (dict): Result dict from :obj:`mmseg.CustomDataset`. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results.get('img_prefix') is not None: - filename = osp.join(results['img_prefix'], - results['img_info']['filename']) - else: - filename = results['img_info']['filename'] - img_bytes = self.file_client.get(filename) - img = mmcv.imfrombytes( - img_bytes, flag=self.color_type, backend=self.imdecode_backend) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results['img_norm_cfg'] = dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(to_float32={self.to_float32},' - repr_str += f"color_type='{self.color_type}'," - repr_str += f"imdecode_backend='{self.imdecode_backend}')" - return repr_str - - -@PIPELINES.register_module() -class LoadAnnotations(object): - """Load annotations for semantic segmentation. - - Args: - reduce_zero_label (bool): Whether reduce all label value by 1. - Usually used for datasets where 0 is background label. - Default: False. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: - 'pillow' - """ - - def __init__(self, - reduce_zero_label=False, - file_client_args=dict(backend='disk'), - imdecode_backend='pillow'): - self.reduce_zero_label = reduce_zero_label - self.file_client_args = file_client_args.copy() - self.file_client = None - self.imdecode_backend = imdecode_backend - - def __call__(self, results): - """Call function to load multiple types annotations. - - Args: - results (dict): Result dict from :obj:`mmseg.CustomDataset`. - - Returns: - dict: The dict contains loaded semantic segmentation annotations. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results.get('seg_prefix', None) is not None: - filename = osp.join(results['seg_prefix'], - results['ann_info']['seg_map']) - else: - filename = results['ann_info']['seg_map'] - img_bytes = self.file_client.get(filename) - gt_semantic_seg = mmcv.imfrombytes( - img_bytes, flag='unchanged', - backend=self.imdecode_backend).squeeze().astype(np.uint8) - # modify if custom classes - if results.get('label_map', None) is not None: - for old_id, new_id in results['label_map'].items(): - gt_semantic_seg[gt_semantic_seg == old_id] = new_id - # reduce zero_label - if self.reduce_zero_label: - # avoid using underflow conversion - gt_semantic_seg[gt_semantic_seg == 0] = 255 - gt_semantic_seg = gt_semantic_seg - 1 - gt_semantic_seg[gt_semantic_seg == 254] = 255 - results['gt_semantic_seg'] = gt_semantic_seg - results['seg_fields'].append('gt_semantic_seg') - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(reduce_zero_label={self.reduce_zero_label},' - repr_str += f"imdecode_backend='{self.imdecode_backend}')" - return repr_str diff --git a/spaces/Pluviophile/vits-uma-genshin-honkai/transforms.py b/spaces/Pluviophile/vits-uma-genshin-honkai/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Pluviophile/vits-uma-genshin-honkai/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/docs/AUDIOGEN.md b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/docs/AUDIOGEN.md deleted file mode 100644 index a0ff481190fb52fe865aa66aaaa10176f7cf995c..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/docs/AUDIOGEN.md +++ /dev/null @@ -1,158 +0,0 @@ -# AudioGen: Textually-guided audio generation - -AudioCraft provides the code and a model re-implementing AudioGen, a [textually-guided audio generation][audiogen_arxiv] -model that performs text-to-sound generation. - -The provided AudioGen reimplementation follows the LM model architecture introduced in [MusicGen][musicgen_arxiv] -and is a single stage auto-regressive Transformer model trained over a 16kHz -
EnCodec tokenizer with 4 codebooks sampled at 50 Hz. -This model variant reaches similar audio quality than the original implementation introduced in the AudioGen publication -while providing faster generation speed given the smaller frame rate. - -**Important note:** The provided models are NOT the original models used to report numbers in the -[AudioGen publication][audiogen_arxiv]. Refer to the model card to learn more about architectural changes. - -Listen to samples from the **original AudioGen implementation** in our [sample page][audiogen_samples]. - - -## Model Card - -See [the model card](../model_cards/AUDIOGEN_MODEL_CARD.md). - - -## Installation - -Please follow the AudioCraft installation instructions from the [README](../README.md). - -AudioCraft requires a GPU with at least 16 GB of memory for running inference with the medium-sized models (~1.5B parameters). - -## API and usage - -We provide a simple API and 1 pre-trained models for AudioGen: - -`facebook/audiogen-medium`: 1.5B model, text to sound - [🤗 Hub](https://huggingface.co/facebook/audiogen-medium) - -You can play with AudioGen by running the jupyter notebook at [`demos/audiogen_demo.ipynb`](../demos/audiogen_demo.ipynb) locally (if you have a GPU). - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import AudioGen -from audiocraft.data.audio import audio_write - -model = AudioGen.get_pretrained('facebook/audiogen-medium') -model.set_generation_params(duration=5) # generate 5 seconds. -descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor'] -wav = model.generate(descriptions) # generates 3 samples. - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) -``` - -## Training - -The [AudioGenSolver](../audiocraft/solvers/audiogen.py) implements the AudioGen's training pipeline -used to develop the released model. Note that this may not fully reproduce the results presented in the paper. -Similarly to MusicGen, it defines an autoregressive language modeling task over multiple streams of -discrete tokens extracted from a pre-trained EnCodec model (see [EnCodec documentation](./ENCODEC.md) -for more details on how to train such model) with dataset-specific changes for environmental sound -processing. - -Note that **we do NOT provide any of the datasets** used for training AudioGen. - -### Example configurations and grids - -We provide configurations to reproduce the released models and our research. -AudioGen solvers configuration are available in [config/solver/audiogen](../config/solver/audiogen). -The base training configuration used for the released models is the following: -[`solver=audiogen/audiogen_base_16khz`](../config/solver/audiogen/audiogen_base_16khz.yaml) - -Please find some example grids to train AudioGen at -[audiocraft/grids/audiogen](../audiocraft/grids/audiogen/). - -```shell -# text-to-sound -dora grid audiogen.audiogen_base_16khz -``` - -### Sound dataset and metadata - -AudioGen's underlying dataset is an AudioDataset augmented with description metadata. -The AudioGen dataset implementation expects the metadata to be available as `.json` files -at the same location as the audio files or through specified external folder. -Learn more in the [datasets section](./DATASETS.md). - -### Evaluation stage - -By default, evaluation stage is also computing the cross-entropy and the perplexity over the -evaluation dataset. Indeed the objective metrics used for evaluation can be costly to run -or require some extra dependencies. Please refer to the [metrics documentation](./METRICS.md) -for more details on the requirements for each metric. - -We provide an off-the-shelf configuration to enable running the objective metrics -for audio generation in -[config/solver/audiogen/evaluation/objective_eval](../config/solver/audiogen/evaluation/objective_eval.yaml). - -One can then activate evaluation the following way: -```shell -# using the configuration -dora run solver=audiogen/debug solver/audiogen/evaluation=objective_eval -# specifying each of the fields, e.g. to activate KL computation -dora run solver=audiogen/debug evaluate.metrics.kld=true -``` - -See [an example evaluation grid](../audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py). - -### Generation stage - -The generation stage allows to generate samples conditionally and/or unconditionally and to perform -audio continuation (from a prompt). We currently support greedy sampling (argmax), sampling -from softmax with a given temperature, top-K and top-P (nucleus) sampling. The number of samples -generated and the batch size used are controlled by the `dataset.generate` configuration -while the other generation parameters are defined in `generate.lm`. - -```shell -# control sampling parameters -dora run solver=audiogen/debug generate.lm.gen_duration=5 generate.lm.use_sampling=true generate.lm.top_k=15 -``` - -## More information - -Refer to [MusicGen's instructions](./MUSICGEN.md). - -### Learn more - -Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md). - - -## Citation - -AudioGen -``` -@article{kreuk2022audiogen, - title={Audiogen: Textually guided audio generation}, - author={Kreuk, Felix and Synnaeve, Gabriel and Polyak, Adam and Singer, Uriel and D{\'e}fossez, Alexandre and Copet, Jade and Parikh, Devi and Taigman, Yaniv and Adi, Yossi}, - journal={arXiv preprint arXiv:2209.15352}, - year={2022} -} -``` - -MusicGen -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - -## License - -See license information in the [model card](../model_cards/AUDIOGEN_MODEL_CARD.md). - -[audiogen_arxiv]: https://arxiv.org/abs/2209.15352 -[musicgen_arxiv]: https://arxiv.org/abs/2306.05284 -[audiogen_samples]: https://felixkreuk.github.io/audiogen/ diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/losses/test_losses.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/losses/test_losses.py deleted file mode 100644 index b6681e12c453dea5aeba738ab252d1923b7e0941..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/losses/test_losses.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random - -import torch - -from audiocraft.losses import ( - MelSpectrogramL1Loss, - MultiScaleMelSpectrogramLoss, - MRSTFTLoss, - SISNR, - STFTLoss, -) - - -def test_mel_l1_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - mel_l1 = MelSpectrogramL1Loss(sample_rate=22_050) - loss = mel_l1(t1, t2) - loss_same = mel_l1(t1, t1) - - assert isinstance(loss, torch.Tensor) - assert isinstance(loss_same, torch.Tensor) - assert loss_same.item() == 0.0 - - -def test_msspec_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - msspec = MultiScaleMelSpectrogramLoss(sample_rate=22_050) - loss = msspec(t1, t2) - loss_same = msspec(t1, t1) - - assert isinstance(loss, torch.Tensor) - assert isinstance(loss_same, torch.Tensor) - assert loss_same.item() == 0.0 - - -def test_mrstft_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - mrstft = MRSTFTLoss() - loss = mrstft(t1, t2) - - assert isinstance(loss, torch.Tensor) - - -def test_sisnr_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - sisnr = SISNR() - loss = sisnr(t1, t2) - - assert isinstance(loss, torch.Tensor) - - -def test_stft_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - mrstft = STFTLoss() - loss = mrstft(t1, t2) - - assert isinstance(loss, torch.Tensor) diff --git a/spaces/Raspberry-ai/main/.env/bin/Activate.ps1 b/spaces/Raspberry-ai/main/.env/bin/Activate.ps1 deleted file mode 100644 index eeea3583fa130d4702a05012a2103152daf51487..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/bin/Activate.ps1 +++ /dev/null @@ -1,247 +0,0 @@ -<# -.Synopsis -Activate a Python virtual environment for the current PowerShell session. - -.Description -Pushes the python executable for a virtual environment to the front of the -$Env:PATH environment variable and sets the prompt to signify that you are -in a Python virtual environment. Makes use of the command line switches as -well as the `pyvenv.cfg` file values present in the virtual environment. - -.Parameter VenvDir -Path to the directory that contains the virtual environment to activate. The -default value for this is the parent of the directory that the Activate.ps1 -script is located within. - -.Parameter Prompt -The prompt prefix to display when this virtual environment is activated. By -default, this prompt is the name of the virtual environment folder (VenvDir) -surrounded by parentheses and followed by a single space (ie. '(.venv) '). - -.Example -Activate.ps1 -Activates the Python virtual environment that contains the Activate.ps1 script. - -.Example -Activate.ps1 -Verbose -Activates the Python virtual environment that contains the Activate.ps1 script, -and shows extra information about the activation as it executes. - -.Example -Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv -Activates the Python virtual environment located in the specified location. - -.Example -Activate.ps1 -Prompt "MyPython" -Activates the Python virtual environment that contains the Activate.ps1 script, -and prefixes the current prompt with the specified string (surrounded in -parentheses) while the virtual environment is active. - -.Notes -On Windows, it may be required to enable this Activate.ps1 script by setting the -execution policy for the user. You can do this by issuing the following PowerShell -command: - -PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser - -For more information on Execution Policies: -https://go.microsoft.com/fwlink/?LinkID=135170 - -#> -Param( - [Parameter(Mandatory = $false)] - [String] - $VenvDir, - [Parameter(Mandatory = $false)] - [String] - $Prompt -) - -<# Function declarations --------------------------------------------------- #> - -<# -.Synopsis -Remove all shell session elements added by the Activate script, including the -addition of the virtual environment's Python executable from the beginning of -the PATH variable. - -.Parameter NonDestructive -If present, do not remove this function from the global namespace for the -session. - -#> -function global:deactivate ([switch]$NonDestructive) { - # Revert to original values - - # The prior prompt: - if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { - Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt - Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT - } - - # The prior PYTHONHOME: - if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { - Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME - Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME - } - - # The prior PATH: - if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { - Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH - Remove-Item -Path Env:_OLD_VIRTUAL_PATH - } - - # Just remove the VIRTUAL_ENV altogether: - if (Test-Path -Path Env:VIRTUAL_ENV) { - Remove-Item -Path env:VIRTUAL_ENV - } - - # Just remove VIRTUAL_ENV_PROMPT altogether. - if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { - Remove-Item -Path env:VIRTUAL_ENV_PROMPT - } - - # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: - if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { - Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force - } - - # Leave deactivate function in the global namespace if requested: - if (-not $NonDestructive) { - Remove-Item -Path function:deactivate - } -} - -<# -.Description -Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the -given folder, and returns them in a map. - -For each line in the pyvenv.cfg file, if that line can be parsed into exactly -two strings separated by `=` (with any amount of whitespace surrounding the =) -then it is considered a `key = value` line. The left hand string is the key, -the right hand is the value. - -If the value starts with a `'` or a `"` then the first and last character is -stripped from the value before being captured. - -.Parameter ConfigDir -Path to the directory that contains the `pyvenv.cfg` file. -#> -function Get-PyVenvConfig( - [String] - $ConfigDir -) { - Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" - - # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). - $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue - - # An empty map will be returned if no config file is found. - $pyvenvConfig = @{ } - - if ($pyvenvConfigPath) { - - Write-Verbose "File exists, parse `key = value` lines" - $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath - - $pyvenvConfigContent | ForEach-Object { - $keyval = $PSItem -split "\s*=\s*", 2 - if ($keyval[0] -and $keyval[1]) { - $val = $keyval[1] - - # Remove extraneous quotations around a string value. - if ("'""".Contains($val.Substring(0, 1))) { - $val = $val.Substring(1, $val.Length - 2) - } - - $pyvenvConfig[$keyval[0]] = $val - Write-Verbose "Adding Key: '$($keyval[0])'='$val'" - } - } - } - return $pyvenvConfig -} - - -<# Begin Activate script --------------------------------------------------- #> - -# Determine the containing directory of this script -$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$VenvExecDir = Get-Item -Path $VenvExecPath - -Write-Verbose "Activation script is located in path: '$VenvExecPath'" -Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" -Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" - -# Set values required in priority: CmdLine, ConfigFile, Default -# First, get the location of the virtual environment, it might not be -# VenvExecDir if specified on the command line. -if ($VenvDir) { - Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" -} -else { - Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." - $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") - Write-Verbose "VenvDir=$VenvDir" -} - -# Next, read the `pyvenv.cfg` file to determine any required value such -# as `prompt`. -$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir - -# Next, set the prompt from the command line, or the config file, or -# just use the name of the virtual environment folder. -if ($Prompt) { - Write-Verbose "Prompt specified as argument, using '$Prompt'" -} -else { - Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" - if ($pyvenvCfg -and $pyvenvCfg['prompt']) { - Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" - $Prompt = $pyvenvCfg['prompt']; - } - else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" - Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" - $Prompt = Split-Path -Path $venvDir -Leaf - } -} - -Write-Verbose "Prompt = '$Prompt'" -Write-Verbose "VenvDir='$VenvDir'" - -# Deactivate any currently active virtual environment, but leave the -# deactivate function in place. -deactivate -nondestructive - -# Now set the environment variable VIRTUAL_ENV, used by many tools to determine -# that there is an activated venv. -$env:VIRTUAL_ENV = $VenvDir - -if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { - - Write-Verbose "Setting prompt to '$Prompt'" - - # Set the prompt to include the env name - # Make sure _OLD_VIRTUAL_PROMPT is global - function global:_OLD_VIRTUAL_PROMPT { "" } - Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT - New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt - - function global:prompt { - Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " - _OLD_VIRTUAL_PROMPT - } - $env:VIRTUAL_ENV_PROMPT = $Prompt -} - -# Clear PYTHONHOME -if (Test-Path -Path Env:PYTHONHOME) { - Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME - Remove-Item -Path Env:PYTHONHOME -} - -# Add the venv to the PATH -Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH -$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/pager.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/pager.py deleted file mode 100644 index a3f7aa62af1ee2690e1e17ee41f3c368953625b8..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/pager.py +++ /dev/null @@ -1,34 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any - - -class Pager(ABC): - """Base class for a pager.""" - - @abstractmethod - def show(self, content: str) -> None: - """Show content in pager. - - Args: - content (str): Content to be displayed. - """ - - -class SystemPager(Pager): - """Uses the pager installed on the system.""" - - def _pager(self, content: str) -> Any: #  pragma: no cover - return __import__("pydoc").pager(content) - - def show(self, content: str) -> None: - """Use the same pager used by pydoc.""" - self._pager(content) - - -if __name__ == "__main__": # pragma: no cover - from .__main__ import make_test_card - from .console import Console - - console = Console() - with console.pager(styles=True): - console.print(make_test_card()) diff --git a/spaces/Rbrq/DeticChatGPT/detic/data/datasets/register_oid.py b/spaces/Rbrq/DeticChatGPT/detic/data/datasets/register_oid.py deleted file mode 100644 index bd281f53f07074740b453838ba32f42f81a28383..0000000000000000000000000000000000000000 --- a/spaces/Rbrq/DeticChatGPT/detic/data/datasets/register_oid.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Xingyi Zhou from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/coco.py -import copy -import io -import logging -import contextlib -import os -import datetime -import json -import numpy as np - -from PIL import Image - -from fvcore.common.timer import Timer -from fvcore.common.file_io import PathManager, file_lock -from detectron2.structures import BoxMode, PolygonMasks, Boxes -from detectron2.data import DatasetCatalog, MetadataCatalog - -logger = logging.getLogger(__name__) - -""" -This file contains functions to register a COCO-format dataset to the DatasetCatalog. -""" - -__all__ = ["register_coco_instances", "register_coco_panoptic_separated"] - - - -def register_oid_instances(name, metadata, json_file, image_root): - """ - """ - # 1. register a function which returns dicts - DatasetCatalog.register(name, lambda: load_coco_json_mem_efficient( - json_file, image_root, name)) - - # 2. Optionally, add metadata about this dataset, - # since they might be useful in evaluation, visualization or logging - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="oid", **metadata - ) - - -def load_coco_json_mem_efficient(json_file, image_root, dataset_name=None, extra_annotation_keys=None): - """ - Actually not mem efficient - """ - from pycocotools.coco import COCO - - timer = Timer() - json_file = PathManager.get_local_path(json_file) - with contextlib.redirect_stdout(io.StringIO()): - coco_api = COCO(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) - - id_map = None - if dataset_name is not None: - meta = MetadataCatalog.get(dataset_name) - cat_ids = sorted(coco_api.getCatIds()) - cats = coco_api.loadCats(cat_ids) - # The categories in a custom json file may not be sorted. - thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] - meta.thing_classes = thing_classes - - if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): - if "coco" not in dataset_name: - logger.warning( - """ - Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. - """ - ) - id_map = {v: i for i, v in enumerate(cat_ids)} - meta.thing_dataset_id_to_contiguous_id = id_map - - # sort indices for reproducible results - img_ids = sorted(coco_api.imgs.keys()) - imgs = coco_api.loadImgs(img_ids) - logger.info("Loaded {} images in COCO format from {}".format(len(imgs), json_file)) - - dataset_dicts = [] - - ann_keys = ["iscrowd", "bbox", "category_id"] + (extra_annotation_keys or []) - - for img_dict in imgs: - record = {} - record["file_name"] = os.path.join(image_root, img_dict["file_name"]) - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - image_id = record["image_id"] = img_dict["id"] - anno_dict_list = coco_api.imgToAnns[image_id] - if 'neg_category_ids' in img_dict: - record['neg_category_ids'] = \ - [id_map[x] for x in img_dict['neg_category_ids']] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - - assert anno.get("ignore", 0) == 0 - - obj = {key: anno[key] for key in ann_keys if key in anno} - - segm = anno.get("segmentation", None) - if segm: # either list[list[float]] or dict(RLE) - if not isinstance(segm, dict): - # filter out invalid polygons (< 3 points) - segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] - if len(segm) == 0: - num_instances_without_valid_segmentation += 1 - continue # ignore this instance - obj["segmentation"] = segm - - obj["bbox_mode"] = BoxMode.XYWH_ABS - - if id_map: - obj["category_id"] = id_map[obj["category_id"]] - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - del coco_api - return dataset_dicts \ No newline at end of file diff --git a/spaces/Realcat/image-matching-webui/hloc/matchers/superglue.py b/spaces/Realcat/image-matching-webui/hloc/matchers/superglue.py deleted file mode 100644 index 7e427f4908f9af676d0627643393a8090c40a00a..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/hloc/matchers/superglue.py +++ /dev/null @@ -1,31 +0,0 @@ -import sys -from pathlib import Path - -from ..utils.base_model import BaseModel - -sys.path.append(str(Path(__file__).parent / "../../third_party")) -from SuperGluePretrainedNetwork.models.superglue import SuperGlue as SG - - -class SuperGlue(BaseModel): - default_conf = { - "weights": "outdoor", - "sinkhorn_iterations": 100, - "match_threshold": 0.2, - } - required_inputs = [ - "image0", - "keypoints0", - "scores0", - "descriptors0", - "image1", - "keypoints1", - "scores1", - "descriptors1", - ] - - def _init(self, conf): - self.net = SG(conf) - - def _forward(self, data): - return self.net(data) diff --git a/spaces/Realcat/image-matching-webui/third_party/ALIKE/alnet.py b/spaces/Realcat/image-matching-webui/third_party/ALIKE/alnet.py deleted file mode 100644 index 91cb7ee55e502895e7b0037f2add1a35a613cd40..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ALIKE/alnet.py +++ /dev/null @@ -1,194 +0,0 @@ -import torch -from torch import nn -from torchvision.models import resnet -from typing import Optional, Callable - - -class ConvBlock(nn.Module): - def __init__( - self, - in_channels, - out_channels, - gate: Optional[Callable[..., nn.Module]] = None, - norm_layer: Optional[Callable[..., nn.Module]] = None, - ): - super().__init__() - if gate is None: - self.gate = nn.ReLU(inplace=True) - else: - self.gate = gate - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self.conv1 = resnet.conv3x3(in_channels, out_channels) - self.bn1 = norm_layer(out_channels) - self.conv2 = resnet.conv3x3(out_channels, out_channels) - self.bn2 = norm_layer(out_channels) - - def forward(self, x): - x = self.gate(self.bn1(self.conv1(x))) # B x in_channels x H x W - x = self.gate(self.bn2(self.conv2(x))) # B x out_channels x H x W - return x - - -# copied from torchvision\models\resnet.py#27->BasicBlock -class ResBlock(nn.Module): - expansion: int = 1 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - gate: Optional[Callable[..., nn.Module]] = None, - norm_layer: Optional[Callable[..., nn.Module]] = None, - ) -> None: - super(ResBlock, self).__init__() - if gate is None: - self.gate = nn.ReLU(inplace=True) - else: - self.gate = gate - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError("ResBlock only supports groups=1 and base_width=64") - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in ResBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = resnet.conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.conv2 = resnet.conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x: torch.Tensor) -> torch.Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.gate(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.gate(out) - - return out - - -class ALNet(nn.Module): - def __init__( - self, - c1: int = 32, - c2: int = 64, - c3: int = 128, - c4: int = 128, - dim: int = 128, - single_head: bool = True, - ): - super().__init__() - - self.gate = nn.ReLU(inplace=True) - - self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) - self.pool4 = nn.MaxPool2d(kernel_size=4, stride=4) - - self.block1 = ConvBlock(3, c1, self.gate, nn.BatchNorm2d) - - self.block2 = ResBlock( - inplanes=c1, - planes=c2, - stride=1, - downsample=nn.Conv2d(c1, c2, 1), - gate=self.gate, - norm_layer=nn.BatchNorm2d, - ) - self.block3 = ResBlock( - inplanes=c2, - planes=c3, - stride=1, - downsample=nn.Conv2d(c2, c3, 1), - gate=self.gate, - norm_layer=nn.BatchNorm2d, - ) - self.block4 = ResBlock( - inplanes=c3, - planes=c4, - stride=1, - downsample=nn.Conv2d(c3, c4, 1), - gate=self.gate, - norm_layer=nn.BatchNorm2d, - ) - - # ================================== feature aggregation - self.conv1 = resnet.conv1x1(c1, dim // 4) - self.conv2 = resnet.conv1x1(c2, dim // 4) - self.conv3 = resnet.conv1x1(c3, dim // 4) - self.conv4 = resnet.conv1x1(dim, dim // 4) - self.upsample2 = nn.Upsample( - scale_factor=2, mode="bilinear", align_corners=True - ) - self.upsample4 = nn.Upsample( - scale_factor=4, mode="bilinear", align_corners=True - ) - self.upsample8 = nn.Upsample( - scale_factor=8, mode="bilinear", align_corners=True - ) - self.upsample32 = nn.Upsample( - scale_factor=32, mode="bilinear", align_corners=True - ) - - # ================================== detector and descriptor head - self.single_head = single_head - if not self.single_head: - self.convhead1 = resnet.conv1x1(dim, dim) - self.convhead2 = resnet.conv1x1(dim, dim + 1) - - def forward(self, image): - # ================================== feature encoder - x1 = self.block1(image) # B x c1 x H x W - x2 = self.pool2(x1) - x2 = self.block2(x2) # B x c2 x H/2 x W/2 - x3 = self.pool4(x2) - x3 = self.block3(x3) # B x c3 x H/8 x W/8 - x4 = self.pool4(x3) - x4 = self.block4(x4) # B x dim x H/32 x W/32 - - # ================================== feature aggregation - x1 = self.gate(self.conv1(x1)) # B x dim//4 x H x W - x2 = self.gate(self.conv2(x2)) # B x dim//4 x H//2 x W//2 - x3 = self.gate(self.conv3(x3)) # B x dim//4 x H//8 x W//8 - x4 = self.gate(self.conv4(x4)) # B x dim//4 x H//32 x W//32 - x2_up = self.upsample2(x2) # B x dim//4 x H x W - x3_up = self.upsample8(x3) # B x dim//4 x H x W - x4_up = self.upsample32(x4) # B x dim//4 x H x W - x1234 = torch.cat([x1, x2_up, x3_up, x4_up], dim=1) - - # ================================== detector and descriptor head - if not self.single_head: - x1234 = self.gate(self.convhead1(x1234)) - x = self.convhead2(x1234) # B x dim+1 x H x W - - descriptor_map = x[:, :-1, :, :] - scores_map = torch.sigmoid(x[:, -1, :, :]).unsqueeze(1) - - return scores_map, descriptor_map - - -if __name__ == "__main__": - from thop import profile - - net = ALNet(c1=16, c2=32, c3=64, c4=128, dim=128, single_head=True) - - image = torch.randn(1, 3, 640, 480) - flops, params = profile(net, inputs=(image,), verbose=False) - print("{:<30} {:<8} GFLops".format("Computational complexity: ", flops / 1e9)) - print("{:<30} {:<8} KB".format("Number of parameters: ", params / 1e3)) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/focal_loss.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/focal_loss.py deleted file mode 100644 index 493907c6984d532175e0351daf2eafe4b9ff0256..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/focal_loss.py +++ /dev/null @@ -1,181 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -# This method is only for debugging -def py_sigmoid_focal_loss(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - """PyTorch version of `Focal Loss `_. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the - number of classes - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - pred_sigmoid = pred.sigmoid() - target = target.type_as(pred) - pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) - focal_weight = (alpha * target + (1 - alpha) * - (1 - target)) * pt.pow(gamma) - loss = F.binary_cross_entropy_with_logits( - pred, target, reduction='none') * focal_weight - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -def sigmoid_focal_loss(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - r"""A warpper of cuda version `Focal Loss - `_. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the number - of classes. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - # Function.apply does not accept keyword arguments, so the decorator - # "weighted_loss" is not applicable - loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None, - 'none') - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -@LOSSES.register_module() -class FocalLoss(nn.Module): - - def __init__(self, - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - reduction='mean', - loss_weight=1.0): - """`Focal Loss `_ - - Args: - use_sigmoid (bool, optional): Whether to the prediction is - used for sigmoid or softmax. Defaults to True. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and - "sum". - loss_weight (float, optional): Weight of loss. Defaults to 1.0. - """ - super(FocalLoss, self).__init__() - assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' - self.use_sigmoid = use_sigmoid - self.gamma = gamma - self.alpha = alpha - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if self.use_sigmoid: - if torch.cuda.is_available() and pred.is_cuda: - calculate_loss_func = sigmoid_focal_loss - else: - num_classes = pred.size(1) - target = F.one_hot(target, num_classes=num_classes + 1) - target = target[:, :num_classes] - calculate_loss_func = py_sigmoid_focal_loss - - loss_cls = self.loss_weight * calculate_loss_func( - pred, - target, - weight, - gamma=self.gamma, - alpha=self.alpha, - reduction=reduction, - avg_factor=avg_factor) - - else: - raise NotImplementedError - return loss_cls diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/nms.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/nms.py deleted file mode 100644 index 6d9634281f486ab284091786886854c451368052..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/nms.py +++ /dev/null @@ -1,417 +0,0 @@ -import os - -import numpy as np -import torch - -from annotator.uniformer.mmcv.utils import deprecated_api_warning -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['nms', 'softnms', 'nms_match', 'nms_rotated']) - - -# This function is modified from: https://github.com/pytorch/vision/ -class NMSop(torch.autograd.Function): - - @staticmethod - def forward(ctx, bboxes, scores, iou_threshold, offset, score_threshold, - max_num): - is_filtering_by_score = score_threshold > 0 - if is_filtering_by_score: - valid_mask = scores > score_threshold - bboxes, scores = bboxes[valid_mask], scores[valid_mask] - valid_inds = torch.nonzero( - valid_mask, as_tuple=False).squeeze(dim=1) - - inds = ext_module.nms( - bboxes, scores, iou_threshold=float(iou_threshold), offset=offset) - - if max_num > 0: - inds = inds[:max_num] - if is_filtering_by_score: - inds = valid_inds[inds] - return inds - - @staticmethod - def symbolic(g, bboxes, scores, iou_threshold, offset, score_threshold, - max_num): - from ..onnx import is_custom_op_loaded - has_custom_op = is_custom_op_loaded() - # TensorRT nms plugin is aligned with original nms in ONNXRuntime - is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' - if has_custom_op and (not is_trt_backend): - return g.op( - 'mmcv::NonMaxSuppression', - bboxes, - scores, - iou_threshold_f=float(iou_threshold), - offset_i=int(offset)) - else: - from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze - from ..onnx.onnx_utils.symbolic_helper import _size_helper - - boxes = unsqueeze(g, bboxes, 0) - scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) - - if max_num > 0: - max_num = g.op( - 'Constant', - value_t=torch.tensor(max_num, dtype=torch.long)) - else: - dim = g.op('Constant', value_t=torch.tensor(0)) - max_num = _size_helper(g, bboxes, dim) - max_output_per_class = max_num - iou_threshold = g.op( - 'Constant', - value_t=torch.tensor([iou_threshold], dtype=torch.float)) - score_threshold = g.op( - 'Constant', - value_t=torch.tensor([score_threshold], dtype=torch.float)) - nms_out = g.op('NonMaxSuppression', boxes, scores, - max_output_per_class, iou_threshold, - score_threshold) - return squeeze( - g, - select( - g, nms_out, 1, - g.op( - 'Constant', - value_t=torch.tensor([2], dtype=torch.long))), 1) - - -class SoftNMSop(torch.autograd.Function): - - @staticmethod - def forward(ctx, boxes, scores, iou_threshold, sigma, min_score, method, - offset): - dets = boxes.new_empty((boxes.size(0), 5), device='cpu') - inds = ext_module.softnms( - boxes.cpu(), - scores.cpu(), - dets.cpu(), - iou_threshold=float(iou_threshold), - sigma=float(sigma), - min_score=float(min_score), - method=int(method), - offset=int(offset)) - return dets, inds - - @staticmethod - def symbolic(g, boxes, scores, iou_threshold, sigma, min_score, method, - offset): - from packaging import version - assert version.parse(torch.__version__) >= version.parse('1.7.0') - nms_out = g.op( - 'mmcv::SoftNonMaxSuppression', - boxes, - scores, - iou_threshold_f=float(iou_threshold), - sigma_f=float(sigma), - min_score_f=float(min_score), - method_i=int(method), - offset_i=int(offset), - outputs=2) - return nms_out - - -@deprecated_api_warning({'iou_thr': 'iou_threshold'}) -def nms(boxes, scores, iou_threshold, offset=0, score_threshold=0, max_num=-1): - """Dispatch to either CPU or GPU NMS implementations. - - The input can be either torch tensor or numpy array. GPU NMS will be used - if the input is gpu tensor, otherwise CPU NMS - will be used. The returned type will always be the same as inputs. - - Arguments: - boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4). - scores (torch.Tensor or np.ndarray): scores in shape (N, ). - iou_threshold (float): IoU threshold for NMS. - offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset). - score_threshold (float): score threshold for NMS. - max_num (int): maximum number of boxes after NMS. - - Returns: - tuple: kept dets(boxes and scores) and indice, which is always the \ - same data type as the input. - - Example: - >>> boxes = np.array([[49.1, 32.4, 51.0, 35.9], - >>> [49.3, 32.9, 51.0, 35.3], - >>> [49.2, 31.8, 51.0, 35.4], - >>> [35.1, 11.5, 39.1, 15.7], - >>> [35.6, 11.8, 39.3, 14.2], - >>> [35.3, 11.5, 39.9, 14.5], - >>> [35.2, 11.7, 39.7, 15.7]], dtype=np.float32) - >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.5, 0.4, 0.3],\ - dtype=np.float32) - >>> iou_threshold = 0.6 - >>> dets, inds = nms(boxes, scores, iou_threshold) - >>> assert len(inds) == len(dets) == 3 - """ - assert isinstance(boxes, (torch.Tensor, np.ndarray)) - assert isinstance(scores, (torch.Tensor, np.ndarray)) - is_numpy = False - if isinstance(boxes, np.ndarray): - is_numpy = True - boxes = torch.from_numpy(boxes) - if isinstance(scores, np.ndarray): - scores = torch.from_numpy(scores) - assert boxes.size(1) == 4 - assert boxes.size(0) == scores.size(0) - assert offset in (0, 1) - - if torch.__version__ == 'parrots': - indata_list = [boxes, scores] - indata_dict = { - 'iou_threshold': float(iou_threshold), - 'offset': int(offset) - } - inds = ext_module.nms(*indata_list, **indata_dict) - else: - inds = NMSop.apply(boxes, scores, iou_threshold, offset, - score_threshold, max_num) - dets = torch.cat((boxes[inds], scores[inds].reshape(-1, 1)), dim=1) - if is_numpy: - dets = dets.cpu().numpy() - inds = inds.cpu().numpy() - return dets, inds - - -@deprecated_api_warning({'iou_thr': 'iou_threshold'}) -def soft_nms(boxes, - scores, - iou_threshold=0.3, - sigma=0.5, - min_score=1e-3, - method='linear', - offset=0): - """Dispatch to only CPU Soft NMS implementations. - - The input can be either a torch tensor or numpy array. - The returned type will always be the same as inputs. - - Arguments: - boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4). - scores (torch.Tensor or np.ndarray): scores in shape (N, ). - iou_threshold (float): IoU threshold for NMS. - sigma (float): hyperparameter for gaussian method - min_score (float): score filter threshold - method (str): either 'linear' or 'gaussian' - offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset). - - Returns: - tuple: kept dets(boxes and scores) and indice, which is always the \ - same data type as the input. - - Example: - >>> boxes = np.array([[4., 3., 5., 3.], - >>> [4., 3., 5., 4.], - >>> [3., 1., 3., 1.], - >>> [3., 1., 3., 1.], - >>> [3., 1., 3., 1.], - >>> [3., 1., 3., 1.]], dtype=np.float32) - >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.4, 0.0], dtype=np.float32) - >>> iou_threshold = 0.6 - >>> dets, inds = soft_nms(boxes, scores, iou_threshold, sigma=0.5) - >>> assert len(inds) == len(dets) == 5 - """ - - assert isinstance(boxes, (torch.Tensor, np.ndarray)) - assert isinstance(scores, (torch.Tensor, np.ndarray)) - is_numpy = False - if isinstance(boxes, np.ndarray): - is_numpy = True - boxes = torch.from_numpy(boxes) - if isinstance(scores, np.ndarray): - scores = torch.from_numpy(scores) - assert boxes.size(1) == 4 - assert boxes.size(0) == scores.size(0) - assert offset in (0, 1) - method_dict = {'naive': 0, 'linear': 1, 'gaussian': 2} - assert method in method_dict.keys() - - if torch.__version__ == 'parrots': - dets = boxes.new_empty((boxes.size(0), 5), device='cpu') - indata_list = [boxes.cpu(), scores.cpu(), dets.cpu()] - indata_dict = { - 'iou_threshold': float(iou_threshold), - 'sigma': float(sigma), - 'min_score': min_score, - 'method': method_dict[method], - 'offset': int(offset) - } - inds = ext_module.softnms(*indata_list, **indata_dict) - else: - dets, inds = SoftNMSop.apply(boxes.cpu(), scores.cpu(), - float(iou_threshold), float(sigma), - float(min_score), method_dict[method], - int(offset)) - - dets = dets[:inds.size(0)] - - if is_numpy: - dets = dets.cpu().numpy() - inds = inds.cpu().numpy() - return dets, inds - else: - return dets.to(device=boxes.device), inds.to(device=boxes.device) - - -def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False): - """Performs non-maximum suppression in a batched fashion. - - Modified from https://github.com/pytorch/vision/blob - /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39. - In order to perform NMS independently per class, we add an offset to all - the boxes. The offset is dependent only on the class idx, and is large - enough so that boxes from different classes do not overlap. - - Arguments: - boxes (torch.Tensor): boxes in shape (N, 4). - scores (torch.Tensor): scores in shape (N, ). - idxs (torch.Tensor): each index value correspond to a bbox cluster, - and NMS will not be applied between elements of different idxs, - shape (N, ). - nms_cfg (dict): specify nms type and other parameters like iou_thr. - Possible keys includes the following. - - - iou_thr (float): IoU threshold used for NMS. - - split_thr (float): threshold number of boxes. In some cases the - number of boxes is large (e.g., 200k). To avoid OOM during - training, the users could set `split_thr` to a small value. - If the number of boxes is greater than the threshold, it will - perform NMS on each group of boxes separately and sequentially. - Defaults to 10000. - class_agnostic (bool): if true, nms is class agnostic, - i.e. IoU thresholding happens over all boxes, - regardless of the predicted class. - - Returns: - tuple: kept dets and indice. - """ - nms_cfg_ = nms_cfg.copy() - class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic) - if class_agnostic: - boxes_for_nms = boxes - else: - max_coordinate = boxes.max() - offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) - boxes_for_nms = boxes + offsets[:, None] - - nms_type = nms_cfg_.pop('type', 'nms') - nms_op = eval(nms_type) - - split_thr = nms_cfg_.pop('split_thr', 10000) - # Won't split to multiple nms nodes when exporting to onnx - if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export(): - dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_) - boxes = boxes[keep] - # -1 indexing works abnormal in TensorRT - # This assumes `dets` has 5 dimensions where - # the last dimension is score. - # TODO: more elegant way to handle the dimension issue. - # Some type of nms would reweight the score, such as SoftNMS - scores = dets[:, 4] - else: - max_num = nms_cfg_.pop('max_num', -1) - total_mask = scores.new_zeros(scores.size(), dtype=torch.bool) - # Some type of nms would reweight the score, such as SoftNMS - scores_after_nms = scores.new_zeros(scores.size()) - for id in torch.unique(idxs): - mask = (idxs == id).nonzero(as_tuple=False).view(-1) - dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_) - total_mask[mask[keep]] = True - scores_after_nms[mask[keep]] = dets[:, -1] - keep = total_mask.nonzero(as_tuple=False).view(-1) - - scores, inds = scores_after_nms[keep].sort(descending=True) - keep = keep[inds] - boxes = boxes[keep] - - if max_num > 0: - keep = keep[:max_num] - boxes = boxes[:max_num] - scores = scores[:max_num] - - return torch.cat([boxes, scores[:, None]], -1), keep - - -def nms_match(dets, iou_threshold): - """Matched dets into different groups by NMS. - - NMS match is Similar to NMS but when a bbox is suppressed, nms match will - record the indice of suppressed bbox and form a group with the indice of - kept bbox. In each group, indice is sorted as score order. - - Arguments: - dets (torch.Tensor | np.ndarray): Det boxes with scores, shape (N, 5). - iou_thr (float): IoU thresh for NMS. - - Returns: - List[torch.Tensor | np.ndarray]: The outer list corresponds different - matched group, the inner Tensor corresponds the indices for a group - in score order. - """ - if dets.shape[0] == 0: - matched = [] - else: - assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \ - f'but get {dets.shape}' - if isinstance(dets, torch.Tensor): - dets_t = dets.detach().cpu() - else: - dets_t = torch.from_numpy(dets) - indata_list = [dets_t] - indata_dict = {'iou_threshold': float(iou_threshold)} - matched = ext_module.nms_match(*indata_list, **indata_dict) - if torch.__version__ == 'parrots': - matched = matched.tolist() - - if isinstance(dets, torch.Tensor): - return [dets.new_tensor(m, dtype=torch.long) for m in matched] - else: - return [np.array(m, dtype=np.int) for m in matched] - - -def nms_rotated(dets, scores, iou_threshold, labels=None): - """Performs non-maximum suppression (NMS) on the rotated boxes according to - their intersection-over-union (IoU). - - Rotated NMS iteratively removes lower scoring rotated boxes which have an - IoU greater than iou_threshold with another (higher scoring) rotated box. - - Args: - boxes (Tensor): Rotated boxes in shape (N, 5). They are expected to \ - be in (x_ctr, y_ctr, width, height, angle_radian) format. - scores (Tensor): scores in shape (N, ). - iou_threshold (float): IoU thresh for NMS. - labels (Tensor): boxes' label in shape (N,). - - Returns: - tuple: kept dets(boxes and scores) and indice, which is always the \ - same data type as the input. - """ - if dets.shape[0] == 0: - return dets, None - multi_label = labels is not None - if multi_label: - dets_wl = torch.cat((dets, labels.unsqueeze(1)), 1) - else: - dets_wl = dets - _, order = scores.sort(0, descending=True) - dets_sorted = dets_wl.index_select(0, order) - - if torch.__version__ == 'parrots': - keep_inds = ext_module.nms_rotated( - dets_wl, - scores, - order, - dets_sorted, - iou_threshold=iou_threshold, - multi_label=multi_label) - else: - keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted, - iou_threshold, multi_label) - dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)), - dim=1) - return dets, keep_inds diff --git a/spaces/SerdarHelli/diffusion-point-cloud/app.py b/spaces/SerdarHelli/diffusion-point-cloud/app.py deleted file mode 100644 index 5884005e4b227aaa6f07c681ab27fe06e24786da..0000000000000000000000000000000000000000 --- a/spaces/SerdarHelli/diffusion-point-cloud/app.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import gradio as gr -import plotly.graph_objects as go -import sys -import torch -from huggingface_hub import hf_hub_download -import numpy as np -import random - -os.system("git clone https://github.com/luost26/diffusion-point-cloud") -sys.path.append("diffusion-point-cloud") - -#Codes reference : https://github.com/luost26/diffusion-point-cloud - -from models.vae_gaussian import * -from models.vae_flow import * - -airplane=hf_hub_download("SerdarHelli/diffusion-point-cloud", filename="GEN_airplane.pt",revision="main") -chair="./GEN_chair.pt" - -device='cuda' if torch.cuda.is_available() else 'cpu' - -ckpt_airplane = torch.load(airplane,map_location=torch.device(device)) -ckpt_chair = torch.load(chair,map_location=torch.device(device)) - -def seed_all(seed): - torch.manual_seed(seed) - np.random.seed(seed) - random.seed(seed) - -def normalize_point_clouds(pcs,mode): - if mode is None: - return pcs - for i in range(pcs.size(0)): - pc = pcs[i] - if mode == 'shape_unit': - shift = pc.mean(dim=0).reshape(1, 3) - scale = pc.flatten().std().reshape(1, 1) - elif mode == 'shape_bbox': - pc_max, _ = pc.max(dim=0, keepdim=True) # (1, 3) - pc_min, _ = pc.min(dim=0, keepdim=True) # (1, 3) - shift = ((pc_min + pc_max) / 2).view(1, 3) - scale = (pc_max - pc_min).max().reshape(1, 1) / 2 - pc = (pc - shift) / scale - pcs[i] = pc - return pcs - - - - -def predict(Seed,ckpt): - if Seed==None: - Seed=777 - seed_all(Seed) - - if ckpt['args'].model == 'gaussian': - model = GaussianVAE(ckpt['args']).to(device) - elif ckpt['args'].model == 'flow': - model = FlowVAE(ckpt['args']).to(device) - - model.load_state_dict(ckpt['state_dict']) - # Generate Point Clouds - gen_pcs = [] - with torch.no_grad(): - z = torch.randn([1, ckpt['args'].latent_dim]).to(device) - x = model.sample(z, 2048, flexibility=ckpt['args'].flexibility) - gen_pcs.append(x.detach().cpu()) - gen_pcs = torch.cat(gen_pcs, dim=0)[:1] - gen_pcs = normalize_point_clouds(gen_pcs, mode="shape_bbox") - - return gen_pcs[0] - -def generate(seed,value): - if value=="Airplane": - ckpt=ckpt_airplane - elif value=="Chair": - ckpt=ckpt_chair - else : - ckpt=ckpt_airplane - - colors=(238, 75, 43) - points=predict(seed,ckpt) - num_points=points.shape[0] - - - fig = go.Figure( - data=[ - go.Scatter3d( - x=points[:,0], y=points[:,1], z=points[:,2], - mode='markers', - marker=dict(size=1, color=colors) - ) - ], - layout=dict( - scene=dict( - xaxis=dict(visible=False), - yaxis=dict(visible=False), - zaxis=dict(visible=False) - ) - ) - ) - return fig - -markdown=f''' - # Diffusion Probabilistic Models for 3D Point Cloud Generation - - - [The space demo for the CVPR 2021 paper "Diffusion Probabilistic Models for 3D Point Cloud Generation".](https://arxiv.org/abs/2103.01458) - - [For the official implementation.](https://github.com/luost26/diffusion-point-cloud) - - ### Future Work based on interest - - Adding new models for new type objects - - New Customization - - - - It is running on {device} - - -''' -with gr.Blocks() as demo: - with gr.Column(): - with gr.Row(): - gr.Markdown(markdown) - with gr.Row(): - seed = gr.Slider( minimum=0, maximum=2**16,label='Seed') - value=gr.Dropdown(choices=["Airplane","Chair"],label="Choose Model Type") - #truncate_std = gr.Slider( minimum=1, maximum=2,label='Truncate Std') - - btn = gr.Button(value="Generate") - point_cloud = gr.Plot() - demo.load(generate, [seed,value], point_cloud) - btn.click(generate, [seed,value], point_cloud) - -demo.launch() \ No newline at end of file diff --git a/spaces/Shakeb100/GroomingGenie_AI/clipseg/metrics.py b/spaces/Shakeb100/GroomingGenie_AI/clipseg/metrics.py deleted file mode 100644 index 35d887b61bfa583a8852c80ff164919be7b45f4e..0000000000000000000000000000000000000000 --- a/spaces/Shakeb100/GroomingGenie_AI/clipseg/metrics.py +++ /dev/null @@ -1,271 +0,0 @@ -from torch.functional import Tensor -from general_utils import log -from collections import defaultdict -import numpy as np - -import torch -from torch.nn import functional as nnf - - -class BaseMetric(object): - - def __init__(self, metric_names, pred_range=None, gt_index=0, pred_index=0, eval_intermediate=True, - eval_validation=True): - self._names = tuple(metric_names) - self._eval_intermediate = eval_intermediate - self._eval_validation = eval_validation - - self._pred_range = pred_range - self._pred_index = pred_index - self._gt_index = gt_index - - self.predictions = [] - self.ground_truths = [] - - def eval_intermediate(self): - return self._eval_intermediate - - def eval_validation(self): - return self._eval_validation - - def names(self): - return self._names - - def add(self, predictions, ground_truth): - raise NotImplementedError - - def value(self): - raise NotImplementedError - - def scores(self): - # similar to value but returns dict - value = self.value() - if type(value) == dict: - return value - else: - assert type(value) in {list, tuple} - return list(zip(self.names(), self.value())) - - def _get_pred_gt(self, predictions, ground_truth): - pred = predictions[self._pred_index] - gt = ground_truth[self._gt_index] - - if self._pred_range is not None: - pred = pred[:, self._pred_range[0]: self._pred_range[1]] - - return pred, gt - - -class FixedIntervalMetrics(BaseMetric): - - def __init__(self, sigmoid=False, ignore_mask=False, resize_to=None, - resize_pred=None, n_values=51, custom_threshold=None): - - - super().__init__(('ap', 'best_fgiou', 'best_miou', 'fgiou0.5', 'fgiou0.1', 'mean_iou_0p5', 'mean_iou_0p1', 'best_biniou', 'biniou_0.5', 'fgiou_thresh')) - self.intersections = [] - self.unions = [] - # self.threshold = threshold - self.sigmoid = sigmoid - self.resize_to = resize_to - self.resize_pred = resize_pred # resize prediction to match ground truth - self.class_count = defaultdict(lambda: 0) - self.per_class = defaultdict(lambda : [0,0]) - self.ignore_mask = ignore_mask - self.custom_threshold = custom_threshold - - self.scores_ap = [] - self.scores_iou = [] - self.gts, self.preds = [], [] - self.classes = [] - - # [1:-1] ignores 0 and 1 - self.threshold_values = np.linspace(0, 1, n_values)[1:-1] - - self.metrics = dict(tp=[], fp=[], fn=[], tn=[]) - - def add(self, pred, gt): - - pred_batch = pred[0].cpu() - - if self.sigmoid: - pred_batch = torch.sigmoid(pred_batch) - - gt_batch = gt[0].cpu() - mask_batch = gt[1] if len(gt) > 1 and not self.ignore_mask and gt[1].numel() > 0 else ([None] * len(pred_batch)) - cls_batch = gt[2] if len(gt) > 2 else [None] * len(pred_batch) - - if self.resize_to is not None: - gt_batch = nnf.interpolate(gt_batch, self.resize_to, mode='nearest') - pred_batch = nnf.interpolate(pred_batch, self.resize_to, mode='bilinear', align_corners=False) - - if isinstance(cls_batch, torch.Tensor): - cls_batch = cls_batch.cpu().numpy().tolist() - - assert len(gt_batch) == len(pred_batch) == len(cls_batch), f'{len(gt_batch)} {len(pred_batch)} {len(cls_batch)}' - - for predictions, ground_truth, mask, cls in zip(pred_batch, gt_batch, mask_batch, cls_batch): - - if self.resize_pred: - predictions = nnf.interpolate(predictions.unsqueeze(0).float(), size=ground_truth.size()[-2:], mode='bilinear', align_corners=True) - - p = predictions.flatten() - g = ground_truth.flatten() - - assert len(p) == len(g) - - if mask is not None: - m = mask.flatten().bool() - p = p[m] - g = g[m] - - p_sorted = p.sort() - p = p_sorted.values - g = g[p_sorted.indices] - - tps, fps, fns, tns = [], [], [], [] - for thresh in self.threshold_values: - - valid = torch.where(p > thresh)[0] - if len(valid) > 0: - n = int(valid[0]) - else: - n = len(g) - - fn = int(g[:n].sum()) - tp = int(g[n:].sum()) - fns += [fn] - tns += [n - fn] - tps += [tp] - fps += [len(g) - n - tp] - - self.metrics['tp'] += [tps] - self.metrics['fp'] += [fps] - self.metrics['fn'] += [fns] - self.metrics['tn'] += [tns] - - self.classes += [cls.item() if isinstance(cls, torch.Tensor) else cls] - - def value(self): - - import time - t_start = time.time() - - if set(self.classes) == set([None]): - all_classes = None - log.warning('classes were not provided, cannot compute mIoU') - else: - all_classes = set(int(c) for c in self.classes) - # log.info(f'compute metrics for {len(all_classes)} classes') - - summed = {k: [sum([self.metrics[k][i][j] - for i in range(len(self.metrics[k]))]) - for j in range(len(self.threshold_values))] - for k in self.metrics.keys()} - - if all_classes is not None: - - assert len(self.classes) == len(self.metrics['tp']) == len(self.metrics['fn']) - # group by class - metrics_by_class = {c: {k: [] for k in self.metrics.keys()} for c in all_classes} - for i in range(len(self.metrics['tp'])): - for k in self.metrics.keys(): - metrics_by_class[self.classes[i]][k] += [self.metrics[k][i]] - - # sum over all instances within the classes - summed_by_cls = {k: {c: np.array(metrics_by_class[c][k]).sum(0).tolist() for c in all_classes} for k in self.metrics.keys()} - - - # Compute average precision - - assert (np.array(summed['fp']) + np.array(summed['tp']) ).sum(), 'no predictions is made' - - # only consider values where a prediction is made - precisions = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j]) for j in range(len(self.threshold_values)) - if summed['tp'][j] + summed['fp'][j] > 0] - recalls = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fn'][j]) for j in range(len(self.threshold_values)) - if summed['tp'][j] + summed['fp'][j] > 0] - - # remove duplicate recall-precision-pairs (and sort by recall value) - recalls, precisions = zip(*sorted(list(set(zip(recalls, precisions))), key=lambda x: x[0])) - - from scipy.integrate import simps - ap = simps(precisions, recalls) - - # Compute best IoU - fgiou_scores = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j] + summed['fn'][j]) for j in range(len(self.threshold_values))] - - biniou_scores = [ - 0.5*(summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j] + summed['fn'][j])) + - 0.5*(summed['tn'][j] / (1 + summed['tn'][j] + summed['fn'][j] + summed['fp'][j])) - for j in range(len(self.threshold_values)) - ] - - index_0p5 = self.threshold_values.tolist().index(0.5) - index_0p1 = self.threshold_values.tolist().index(0.1) - index_0p2 = self.threshold_values.tolist().index(0.2) - index_0p3 = self.threshold_values.tolist().index(0.3) - - if self.custom_threshold is not None: - index_ct = self.threshold_values.tolist().index(self.custom_threshold) - - if all_classes is not None: - # mean IoU - mean_ious = [np.mean([summed_by_cls['tp'][c][j] / (1 + summed_by_cls['tp'][c][j] + summed_by_cls['fp'][c][j] + summed_by_cls['fn'][c][j]) - for c in all_classes]) - for j in range(len(self.threshold_values))] - - mean_iou_dict = { - 'miou_best': max(mean_ious) if all_classes is not None else None, - 'miou_0.5': mean_ious[index_0p5] if all_classes is not None else None, - 'miou_0.1': mean_ious[index_0p1] if all_classes is not None else None, - 'miou_0.2': mean_ious[index_0p2] if all_classes is not None else None, - 'miou_0.3': mean_ious[index_0p3] if all_classes is not None else None, - 'miou_best_t': self.threshold_values[np.argmax(mean_ious)], - 'mean_iou_ct': mean_ious[index_ct] if all_classes is not None and self.custom_threshold is not None else None, - 'mean_iou_scores': mean_ious, - } - - print(f'metric computation on {(len(all_classes) if all_classes is not None else "no")} classes took {time.time() - t_start:.1f}s') - - return { - 'ap': ap, - - # fgiou - 'fgiou_best': max(fgiou_scores), - 'fgiou_0.5': fgiou_scores[index_0p5], - 'fgiou_0.1': fgiou_scores[index_0p1], - 'fgiou_0.2': fgiou_scores[index_0p2], - 'fgiou_0.3': fgiou_scores[index_0p3], - 'fgiou_best_t': self.threshold_values[np.argmax(fgiou_scores)], - - # mean iou - - - # biniou - 'biniou_best': max(biniou_scores), - 'biniou_0.5': biniou_scores[index_0p5], - 'biniou_0.1': biniou_scores[index_0p1], - 'biniou_0.2': biniou_scores[index_0p2], - 'biniou_0.3': biniou_scores[index_0p3], - 'biniou_best_t': self.threshold_values[np.argmax(biniou_scores)], - - # custom threshold - 'fgiou_ct': fgiou_scores[index_ct] if self.custom_threshold is not None else None, - 'biniou_ct': biniou_scores[index_ct] if self.custom_threshold is not None else None, - 'ct': self.custom_threshold, - - # statistics - 'fgiou_scores': fgiou_scores, - 'biniou_scores': biniou_scores, - 'precision_recall_curve': sorted(list(set(zip(recalls, precisions)))), - 'summed_statistics': summed, - 'summed_by_cls_statistics': summed_by_cls, - - **mean_iou_dict - } - - # ('ap', 'best_fgiou', 'best_miou', 'fgiou0.5', 'fgiou0.1', 'mean_iou_0p5', 'mean_iou_0p1', 'best_biniou', 'biniou_0.5', 'fgiou_thresh' - - # return ap, best_fgiou, best_mean_iou, iou_0p5, iou_0p1, mean_iou_0p5, mean_iou_0p1, best_biniou, biniou0p5, best_fgiou_thresh, {'summed': summed, 'summed_by_cls': summed_by_cls} - diff --git a/spaces/Shredder/CONBERT-2/sus_fls.py b/spaces/Shredder/CONBERT-2/sus_fls.py deleted file mode 100644 index e96419cd67d3b7c486d60e891a61f92cd58af42a..0000000000000000000000000000000000000000 --- a/spaces/Shredder/CONBERT-2/sus_fls.py +++ /dev/null @@ -1,52 +0,0 @@ -from transformers import RobertaTokenizer,pipeline -import torch -import nltk -from nltk.tokenize import sent_tokenize -from fin_readability_sustainability import BERTClass, do_predict -import pandas as pd -import en_core_web_sm - -nltk.download('punkt') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -#SUSTAINABILITY STARTS -tokenizer_sus = RobertaTokenizer.from_pretrained('roberta-base') -model_sustain = BERTClass(2, "sustanability") -model_sustain.to(device) -model_sustain.load_state_dict(torch.load('sustainability_model.bin', map_location=device)['model_state_dict']) - -def get_sustainability(text): - df = pd.DataFrame({'sentence':sent_tokenize(text)}) - actual_predictions_sustainability = do_predict(model_sustain, tokenizer_sus, df) - highlight = [] - for sent, prob in zip(df['sentence'].values, actual_predictions_sustainability[1]): - if prob>=4.384316: - highlight.append((sent, 'non-sustainable')) - elif prob<=1.423736: - highlight.append((sent, 'sustainable')) - else: - highlight.append((sent, '-')) - return highlight -#SUSTAINABILITY ENDS - - -##Forward Looking Statement -nlp = en_core_web_sm.load() -def split_in_sentences(text): - doc = nlp(text) - return [str(sent).strip() for sent in doc.sents] -def make_spans(text,results): - results_list = [] - for i in range(len(results)): - results_list.append(results[i]['label']) - facts_spans = [] - facts_spans = list(zip(split_in_sentences(text),results_list)) - return facts_spans - -fls_model = pipeline("text-classification", model="yiyanghkust/finbert-fls", tokenizer="yiyanghkust/finbert-fls") -def fls(text): - results = fls_model(split_in_sentences(text)) - return make_spans(text,results) - - - \ No newline at end of file diff --git a/spaces/Soumahara/hakurei-waifu-diffusion/README.md b/spaces/Soumahara/hakurei-waifu-diffusion/README.md deleted file mode 100644 index 62ece8b2448738716eccff5c88613751424ec805..0000000000000000000000000000000000000000 --- a/spaces/Soumahara/hakurei-waifu-diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hakurei Waifu Diffusion -emoji: 🦀 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SpacesExamples/InvokeAI/Dockerfile b/spaces/SpacesExamples/InvokeAI/Dockerfile deleted file mode 100644 index 5e2f3c166f32bd80849b9d71ef24b19daaf588e3..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/InvokeAI/Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu22.04 - -ENV DEBIAN_FRONTEND=noninteractive \ - TZ=America/Los_Angeles - -ARG USE_PERSISTENT_DATA - -RUN apt-get update && apt-get install -y \ - git \ - make build-essential libssl-dev zlib1g-dev \ - libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \ - libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git-lfs \ - ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \ - && rm -rf /var/lib/apt/lists/* \ - && git lfs install - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -# User -RUN useradd -m -u 1000 user -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Pyenv -RUN curl https://pyenv.run | bash -ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH - -ARG PYTHON_VERSION=3.9.17 -# Python -RUN pyenv install $PYTHON_VERSION && \ - pyenv global $PYTHON_VERSION && \ - pyenv rehash && \ - pip install --no-cache-dir --upgrade pip setuptools wheel && \ - pip install --no-cache-dir \ - datasets \ - huggingface-hub "protobuf<4" "click<8.1" - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set the working directory to /data if USE_PERSISTENT_DATA is set, otherwise set to $HOME/app -WORKDIR $HOME/app -EXPOSE 9090 - -COPY --chown=user:user ./invokeai.yaml $HOME/app/invokeai.yaml - -RUN ["invokeai-configure", "--yes", "--default_only", "--root", "."] - -# Install models according to https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/050_INSTALLING_MODELS.md -# RUN ["invokeai-model-install", "--add","https://civitai.com/api/download/models/128713"] - -# Run IDE -CMD ["invokeai-web", "--host", "0.0.0.0", "--no-internet_available"] diff --git a/spaces/Sriharsha6902/Chat-Analyser/preprocessor.py b/spaces/Sriharsha6902/Chat-Analyser/preprocessor.py deleted file mode 100644 index 28340f05d7232c2f34ec6eec5cb197334a4b0df5..0000000000000000000000000000000000000000 --- a/spaces/Sriharsha6902/Chat-Analyser/preprocessor.py +++ /dev/null @@ -1,77 +0,0 @@ -# Importing modules -import re -import pandas as pd - -# To convert text into data frame in desired form -def preprocess(data): - - # Regular expression - pattern = '\d{1,2}/\d{1,2}/\d{2,4},\s\d{1,2}:\d{2}\s-\s' - - # Split text file into messages & dates based on pattern - messages = re.split(pattern, data)[1:] - dates = re.findall(pattern, data) - - # Creating data frame - df = pd.DataFrame({'user_message': messages, 'message_date': dates}) - - # convert dates type - try: - df['message_date'] = pd.to_datetime(df['message_date'], format='%d/%m/%y, %H:%M - ') - except: - df['message_date'] = pd.to_datetime(df['message_date'], format='%m/%d/%y, %H:%M - ') - df.rename(columns={'message_date': 'date'}, inplace=True) - - users = [] - messages = [] - for message in df['user_message']:# For each message in user_message - - # Split message based on '([\w\W]+?):\s' - entry = re.split('([\w\W]+?):\s', message) - if entry[1:]: - # User name - users.append(entry[1]) - # Only message - messages.append(" ".join(entry[2:])) - else: - # Adding group notifications - users.append('group_notification') - - # Null value - messages.append(entry[0]) - - # Creating new columns - df['user'] = users - df['message'] = messages - - # Remove columns of no use - df.drop(columns=['user_message'], inplace=True) - - # Extract date - df['only_date'] = df['date'].dt.date - - # Extract year - df['year'] = df['date'].dt.year - - # Extract month - df['month_num'] = df['date'].dt.month - - # Extract month name - df['month'] = df['date'].dt.month_name() - - # Extract day - df['day'] = df['date'].dt.day - - # Extract day name - df['day_name'] = df['date'].dt.day_name() - - # Extract hour - df['hour'] = df['date'].dt.hour - - # Extract minute - df['minute'] = df['date'].dt.minute - - # Remove entries having user as group_notification - df = df[df['user'] != 'group_notification'] - df_original=df - return df_original diff --git a/spaces/Subbu-2004/MyNewAiAvatar/app.py b/spaces/Subbu-2004/MyNewAiAvatar/app.py deleted file mode 100644 index 4e068787018cc7b1905cafaf008c2d67324ff0c7..0000000000000000000000000000000000000000 --- a/spaces/Subbu-2004/MyNewAiAvatar/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet Subbu, your youthful and witty personal assistant! At 19 years old, he's full of energy and always eager to help. Subbu's goal is to assist you with any questions or problems you might have. His enthusiasm shines through in every response, making interactions with him enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/charset_normalizer/models.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/charset_normalizer/models.py deleted file mode 100644 index 7f8ca389050cd4bac7fd23d84e399a242d35d309..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/charset_normalizer/models.py +++ /dev/null @@ -1,337 +0,0 @@ -from encodings.aliases import aliases -from hashlib import sha256 -from json import dumps -from typing import Any, Dict, Iterator, List, Optional, Tuple, Union - -from .constant import TOO_BIG_SEQUENCE -from .utils import iana_name, is_multi_byte_encoding, unicode_range - - -class CharsetMatch: - def __init__( - self, - payload: bytes, - guessed_encoding: str, - mean_mess_ratio: float, - has_sig_or_bom: bool, - languages: "CoherenceMatches", - decoded_payload: Optional[str] = None, - ): - self._payload: bytes = payload - - self._encoding: str = guessed_encoding - self._mean_mess_ratio: float = mean_mess_ratio - self._languages: CoherenceMatches = languages - self._has_sig_or_bom: bool = has_sig_or_bom - self._unicode_ranges: Optional[List[str]] = None - - self._leaves: List[CharsetMatch] = [] - self._mean_coherence_ratio: float = 0.0 - - self._output_payload: Optional[bytes] = None - self._output_encoding: Optional[str] = None - - self._string: Optional[str] = decoded_payload - - def __eq__(self, other: object) -> bool: - if not isinstance(other, CharsetMatch): - raise TypeError( - "__eq__ cannot be invoked on {} and {}.".format( - str(other.__class__), str(self.__class__) - ) - ) - return self.encoding == other.encoding and self.fingerprint == other.fingerprint - - def __lt__(self, other: object) -> bool: - """ - Implemented to make sorted available upon CharsetMatches items. - """ - if not isinstance(other, CharsetMatch): - raise ValueError - - chaos_difference: float = abs(self.chaos - other.chaos) - coherence_difference: float = abs(self.coherence - other.coherence) - - # Below 1% difference --> Use Coherence - if chaos_difference < 0.01 and coherence_difference > 0.02: - # When having a tough decision, use the result that decoded as many multi-byte as possible. - if chaos_difference == 0.0 and self.coherence == other.coherence: - return self.multi_byte_usage > other.multi_byte_usage - return self.coherence > other.coherence - - return self.chaos < other.chaos - - @property - def multi_byte_usage(self) -> float: - return 1.0 - len(str(self)) / len(self.raw) - - def __str__(self) -> str: - # Lazy Str Loading - if self._string is None: - self._string = str(self._payload, self._encoding, "strict") - return self._string - - def __repr__(self) -> str: - return "".format(self.encoding, self.fingerprint) - - def add_submatch(self, other: "CharsetMatch") -> None: - if not isinstance(other, CharsetMatch) or other == self: - raise ValueError( - "Unable to add instance <{}> as a submatch of a CharsetMatch".format( - other.__class__ - ) - ) - - other._string = None # Unload RAM usage; dirty trick. - self._leaves.append(other) - - @property - def encoding(self) -> str: - return self._encoding - - @property - def encoding_aliases(self) -> List[str]: - """ - Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. - """ - also_known_as: List[str] = [] - for u, p in aliases.items(): - if self.encoding == u: - also_known_as.append(p) - elif self.encoding == p: - also_known_as.append(u) - return also_known_as - - @property - def bom(self) -> bool: - return self._has_sig_or_bom - - @property - def byte_order_mark(self) -> bool: - return self._has_sig_or_bom - - @property - def languages(self) -> List[str]: - """ - Return the complete list of possible languages found in decoded sequence. - Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. - """ - return [e[0] for e in self._languages] - - @property - def language(self) -> str: - """ - Most probable language found in decoded sequence. If none were detected or inferred, the property will return - "Unknown". - """ - if not self._languages: - # Trying to infer the language based on the given encoding - # Its either English or we should not pronounce ourselves in certain cases. - if "ascii" in self.could_be_from_charset: - return "English" - - # doing it there to avoid circular import - from charset_normalizer.cd import encoding_languages, mb_encoding_languages - - languages = ( - mb_encoding_languages(self.encoding) - if is_multi_byte_encoding(self.encoding) - else encoding_languages(self.encoding) - ) - - if len(languages) == 0 or "Latin Based" in languages: - return "Unknown" - - return languages[0] - - return self._languages[0][0] - - @property - def chaos(self) -> float: - return self._mean_mess_ratio - - @property - def coherence(self) -> float: - if not self._languages: - return 0.0 - return self._languages[0][1] - - @property - def percent_chaos(self) -> float: - return round(self.chaos * 100, ndigits=3) - - @property - def percent_coherence(self) -> float: - return round(self.coherence * 100, ndigits=3) - - @property - def raw(self) -> bytes: - """ - Original untouched bytes. - """ - return self._payload - - @property - def submatch(self) -> List["CharsetMatch"]: - return self._leaves - - @property - def has_submatch(self) -> bool: - return len(self._leaves) > 0 - - @property - def alphabets(self) -> List[str]: - if self._unicode_ranges is not None: - return self._unicode_ranges - # list detected ranges - detected_ranges: List[Optional[str]] = [ - unicode_range(char) for char in str(self) - ] - # filter and sort - self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) - return self._unicode_ranges - - @property - def could_be_from_charset(self) -> List[str]: - """ - The complete list of encoding that output the exact SAME str result and therefore could be the originating - encoding. - This list does include the encoding available in property 'encoding'. - """ - return [self._encoding] + [m.encoding for m in self._leaves] - - def output(self, encoding: str = "utf_8") -> bytes: - """ - Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. - Any errors will be simply ignored by the encoder NOT replaced. - """ - if self._output_encoding is None or self._output_encoding != encoding: - self._output_encoding = encoding - self._output_payload = str(self).encode(encoding, "replace") - - return self._output_payload # type: ignore - - @property - def fingerprint(self) -> str: - """ - Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. - """ - return sha256(self.output()).hexdigest() - - -class CharsetMatches: - """ - Container with every CharsetMatch items ordered by default from most probable to the less one. - Act like a list(iterable) but does not implements all related methods. - """ - - def __init__(self, results: Optional[List[CharsetMatch]] = None): - self._results: List[CharsetMatch] = sorted(results) if results else [] - - def __iter__(self) -> Iterator[CharsetMatch]: - yield from self._results - - def __getitem__(self, item: Union[int, str]) -> CharsetMatch: - """ - Retrieve a single item either by its position or encoding name (alias may be used here). - Raise KeyError upon invalid index or encoding not present in results. - """ - if isinstance(item, int): - return self._results[item] - if isinstance(item, str): - item = iana_name(item, False) - for result in self._results: - if item in result.could_be_from_charset: - return result - raise KeyError - - def __len__(self) -> int: - return len(self._results) - - def __bool__(self) -> bool: - return len(self._results) > 0 - - def append(self, item: CharsetMatch) -> None: - """ - Insert a single match. Will be inserted accordingly to preserve sort. - Can be inserted as a submatch. - """ - if not isinstance(item, CharsetMatch): - raise ValueError( - "Cannot append instance '{}' to CharsetMatches".format( - str(item.__class__) - ) - ) - # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) - if len(item.raw) <= TOO_BIG_SEQUENCE: - for match in self._results: - if match.fingerprint == item.fingerprint and match.chaos == item.chaos: - match.add_submatch(item) - return - self._results.append(item) - self._results = sorted(self._results) - - def best(self) -> Optional["CharsetMatch"]: - """ - Simply return the first match. Strict equivalent to matches[0]. - """ - if not self._results: - return None - return self._results[0] - - def first(self) -> Optional["CharsetMatch"]: - """ - Redundant method, call the method best(). Kept for BC reasons. - """ - return self.best() - - -CoherenceMatch = Tuple[str, float] -CoherenceMatches = List[CoherenceMatch] - - -class CliDetectionResult: - def __init__( - self, - path: str, - encoding: Optional[str], - encoding_aliases: List[str], - alternative_encodings: List[str], - language: str, - alphabets: List[str], - has_sig_or_bom: bool, - chaos: float, - coherence: float, - unicode_path: Optional[str], - is_preferred: bool, - ): - self.path: str = path - self.unicode_path: Optional[str] = unicode_path - self.encoding: Optional[str] = encoding - self.encoding_aliases: List[str] = encoding_aliases - self.alternative_encodings: List[str] = alternative_encodings - self.language: str = language - self.alphabets: List[str] = alphabets - self.has_sig_or_bom: bool = has_sig_or_bom - self.chaos: float = chaos - self.coherence: float = coherence - self.is_preferred: bool = is_preferred - - @property - def __dict__(self) -> Dict[str, Any]: # type: ignore - return { - "path": self.path, - "encoding": self.encoding, - "encoding_aliases": self.encoding_aliases, - "alternative_encodings": self.alternative_encodings, - "language": self.language, - "alphabets": self.alphabets, - "has_sig_or_bom": self.has_sig_or_bom, - "chaos": self.chaos, - "coherence": self.coherence, - "unicode_path": self.unicode_path, - "is_preferred": self.is_preferred, - } - - def to_json(self) -> str: - return dumps(self.__dict__, ensure_ascii=True, indent=4) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_filtering.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_filtering.py deleted file mode 100644 index d5f0d5667581e0729f5b48bcb65a1e78e4e8688e..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_filtering.py +++ /dev/null @@ -1,333 +0,0 @@ -import fnmatch -import glob -import os.path -import sys - -from _pydev_bundle import pydev_log -import pydevd_file_utils -import json -from collections import namedtuple -from _pydev_bundle._pydev_saved_modules import threading -from pydevd_file_utils import normcase -from _pydevd_bundle.pydevd_constants import USER_CODE_BASENAMES_STARTING_WITH, \ - LIBRARY_CODE_BASENAMES_STARTING_WITH, IS_PYPY, IS_WINDOWS -from _pydevd_bundle import pydevd_constants - -ExcludeFilter = namedtuple('ExcludeFilter', 'name, exclude, is_path') - - -def _convert_to_str_and_clear_empty(roots): - new_roots = [] - for root in roots: - assert isinstance(root, str), '%s not str (found: %s)' % (root, type(root)) - if root: - new_roots.append(root) - return new_roots - - -def _check_matches(patterns, paths): - if not patterns and not paths: - # Matched to the end. - return True - - if (not patterns and paths) or (patterns and not paths): - return False - - pattern = normcase(patterns[0]) - path = normcase(paths[0]) - - if not glob.has_magic(pattern): - - if pattern != path: - return False - - elif pattern == '**': - if len(patterns) == 1: - return True # if ** is the last one it matches anything to the right. - - for i in range(len(paths)): - # Recursively check the remaining patterns as the - # current pattern could match any number of paths. - if _check_matches(patterns[1:], paths[i:]): - return True - - elif not fnmatch.fnmatch(path, pattern): - # Current part doesn't match. - return False - - return _check_matches(patterns[1:], paths[1:]) - - -def glob_matches_path(path, pattern, sep=os.sep, altsep=os.altsep): - if altsep: - pattern = pattern.replace(altsep, sep) - path = path.replace(altsep, sep) - - drive = '' - if len(path) > 1 and path[1] == ':': - drive, path = path[0], path[2:] - - if drive and len(pattern) > 1: - if pattern[1] == ':': - if drive.lower() != pattern[0].lower(): - return False - pattern = pattern[2:] - - patterns = pattern.split(sep) - paths = path.split(sep) - if paths: - if paths[0] == '': - paths = paths[1:] - if patterns: - if patterns[0] == '': - patterns = patterns[1:] - - return _check_matches(patterns, paths) - - -class FilesFiltering(object): - ''' - Note: calls at FilesFiltering are uncached. - - The actual API used should be through PyDB. - ''' - - def __init__(self): - self._exclude_filters = [] - self._project_roots = [] - self._library_roots = [] - - # Filter out libraries? - self._use_libraries_filter = False - self.require_module = False # True if some exclude filter filters by the module. - - self.set_use_libraries_filter(os.getenv('PYDEVD_FILTER_LIBRARIES') is not None) - - project_roots = os.getenv('IDE_PROJECT_ROOTS', None) - if project_roots is not None: - project_roots = project_roots.split(os.pathsep) - else: - project_roots = [] - self.set_project_roots(project_roots) - - library_roots = os.getenv('LIBRARY_ROOTS', None) - if library_roots is not None: - library_roots = library_roots.split(os.pathsep) - else: - library_roots = self._get_default_library_roots() - self.set_library_roots(library_roots) - - # Stepping filters. - pydevd_filters = os.getenv('PYDEVD_FILTERS', '') - # To filter out it's something as: {'**/not_my_code/**': True} - if pydevd_filters: - pydev_log.debug("PYDEVD_FILTERS %s", (pydevd_filters,)) - if pydevd_filters.startswith('{'): - # dict(glob_pattern (str) -> exclude(True or False)) - exclude_filters = [] - for key, val in json.loads(pydevd_filters).items(): - exclude_filters.append(ExcludeFilter(key, val, True)) - self._exclude_filters = exclude_filters - else: - # A ';' separated list of strings with globs for the - # list of excludes. - filters = pydevd_filters.split(';') - new_filters = [] - for new_filter in filters: - if new_filter.strip(): - new_filters.append(ExcludeFilter(new_filter.strip(), True, True)) - self._exclude_filters = new_filters - - @classmethod - def _get_default_library_roots(cls): - pydev_log.debug("Collecting default library roots.") - # Provide sensible defaults if not in env vars. - import site - - roots = [] - - try: - import sysconfig # Python 2.7 onwards only. - except ImportError: - pass - else: - for path_name in set(('stdlib', 'platstdlib', 'purelib', 'platlib')) & set(sysconfig.get_path_names()): - roots.append(sysconfig.get_path(path_name)) - - # Make sure we always get at least the standard library location (based on the `os` and - # `threading` modules -- it's a bit weird that it may be different on the ci, but it happens). - roots.append(os.path.dirname(os.__file__)) - roots.append(os.path.dirname(threading.__file__)) - if IS_PYPY: - # On PyPy 3.6 (7.3.1) it wrongly says that sysconfig.get_path('stdlib') is - # /lib-pypy when the installed version is /lib_pypy. - try: - import _pypy_wait - except ImportError: - pydev_log.debug("Unable to import _pypy_wait on PyPy when collecting default library roots.") - else: - pypy_lib_dir = os.path.dirname(_pypy_wait.__file__) - pydev_log.debug("Adding %s to default library roots.", pypy_lib_dir) - roots.append(pypy_lib_dir) - - if hasattr(site, 'getusersitepackages'): - site_paths = site.getusersitepackages() - if isinstance(site_paths, (list, tuple)): - for site_path in site_paths: - roots.append(site_path) - else: - roots.append(site_paths) - - if hasattr(site, 'getsitepackages'): - site_paths = site.getsitepackages() - if isinstance(site_paths, (list, tuple)): - for site_path in site_paths: - roots.append(site_path) - else: - roots.append(site_paths) - - for path in sys.path: - if os.path.exists(path) and os.path.basename(path) in ('site-packages', 'pip-global'): - roots.append(path) - - # On WASM some of the roots may not exist, filter those out. - roots = [path for path in roots if path is not None] - roots.extend([os.path.realpath(path) for path in roots]) - - return sorted(set(roots)) - - def _fix_roots(self, roots): - roots = _convert_to_str_and_clear_empty(roots) - new_roots = [] - for root in roots: - path = self._absolute_normalized_path(root) - if pydevd_constants.IS_WINDOWS: - new_roots.append(path + '\\') - else: - new_roots.append(path + '/') - return new_roots - - def _absolute_normalized_path(self, filename): - ''' - Provides a version of the filename that's absolute and normalized. - ''' - return normcase(pydevd_file_utils.absolute_path(filename)) - - def set_project_roots(self, project_roots): - self._project_roots = self._fix_roots(project_roots) - pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % project_roots) - - def _get_project_roots(self): - return self._project_roots - - def set_library_roots(self, roots): - self._library_roots = self._fix_roots(roots) - pydev_log.debug("LIBRARY_ROOTS %s\n" % roots) - - def _get_library_roots(self): - return self._library_roots - - def in_project_roots(self, received_filename): - ''' - Note: don't call directly. Use PyDb.in_project_scope (there's no caching here and it doesn't - handle all possibilities for knowing whether a project is actually in the scope, it - just handles the heuristics based on the absolute_normalized_filename without the actual frame). - ''' - DEBUG = False - - if received_filename.startswith(USER_CODE_BASENAMES_STARTING_WITH): - if DEBUG: - pydev_log.debug('In in_project_roots - user basenames - starts with %s (%s)', received_filename, USER_CODE_BASENAMES_STARTING_WITH) - return True - - if received_filename.startswith(LIBRARY_CODE_BASENAMES_STARTING_WITH): - if DEBUG: - pydev_log.debug('Not in in_project_roots - library basenames - starts with %s (%s)', received_filename, LIBRARY_CODE_BASENAMES_STARTING_WITH) - return False - - project_roots = self._get_project_roots() # roots are absolute/normalized. - - absolute_normalized_filename = self._absolute_normalized_path(received_filename) - absolute_normalized_filename_as_dir = absolute_normalized_filename + ('\\' if IS_WINDOWS else '/') - - found_in_project = [] - for root in project_roots: - if root and (absolute_normalized_filename.startswith(root) or root == absolute_normalized_filename_as_dir): - if DEBUG: - pydev_log.debug('In project: %s (%s)', absolute_normalized_filename, root) - found_in_project.append(root) - - found_in_library = [] - library_roots = self._get_library_roots() - for root in library_roots: - if root and (absolute_normalized_filename.startswith(root) or root == absolute_normalized_filename_as_dir): - found_in_library.append(root) - if DEBUG: - pydev_log.debug('In library: %s (%s)', absolute_normalized_filename, root) - else: - if DEBUG: - pydev_log.debug('Not in library: %s (%s)', absolute_normalized_filename, root) - - if not project_roots: - # If we have no project roots configured, consider it being in the project - # roots if it's not found in site-packages (because we have defaults for those - # and not the other way around). - in_project = not found_in_library - if DEBUG: - pydev_log.debug('Final in project (no project roots): %s (%s)', absolute_normalized_filename, in_project) - - else: - in_project = False - if found_in_project: - if not found_in_library: - if DEBUG: - pydev_log.debug('Final in project (in_project and not found_in_library): %s (True)', absolute_normalized_filename) - in_project = True - else: - # Found in both, let's see which one has the bigger path matched. - if max(len(x) for x in found_in_project) > max(len(x) for x in found_in_library): - in_project = True - if DEBUG: - pydev_log.debug('Final in project (found in both): %s (%s)', absolute_normalized_filename, in_project) - - return in_project - - def use_libraries_filter(self): - ''' - Should we debug only what's inside project folders? - ''' - return self._use_libraries_filter - - def set_use_libraries_filter(self, use): - pydev_log.debug("pydevd: Use libraries filter: %s\n" % use) - self._use_libraries_filter = use - - def use_exclude_filters(self): - # Enabled if we have any filters registered. - return len(self._exclude_filters) > 0 - - def exclude_by_filter(self, absolute_filename, module_name): - ''' - :return: True if it should be excluded, False if it should be included and None - if no rule matched the given file. - ''' - for exclude_filter in self._exclude_filters: # : :type exclude_filter: ExcludeFilter - if exclude_filter.is_path: - if glob_matches_path(absolute_filename, exclude_filter.name): - return exclude_filter.exclude - else: - # Module filter. - if exclude_filter.name == module_name or module_name.startswith(exclude_filter.name + '.'): - return exclude_filter.exclude - return None - - def set_exclude_filters(self, exclude_filters): - ''' - :param list(ExcludeFilter) exclude_filters: - ''' - self._exclude_filters = exclude_filters - self.require_module = False - for exclude_filter in exclude_filters: - if not exclude_filter.is_path: - self.require_module = True - break diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/modeling/backbone/swin.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/modeling/backbone/swin.py deleted file mode 100644 index 2380cde59570e5d5b8fb2536d0961f8e27a07fd4..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/modeling/backbone/swin.py +++ /dev/null @@ -1,771 +0,0 @@ -# -------------------------------------------------------- -# Swin Transformer -# Copyright (c) 2021 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Ze Liu, Yutong Lin, Yixuan Wei -# -------------------------------------------------------- - -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/Mask2Former -# ------------------------------------------------------------------------------ - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from annotator.oneformer.detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec - - -class Mlp(nn.Module): - """Multilayer perceptron.""" - - def __init__( - self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__( - self, - dim, - window_size, - num_heads, - qkv_bias=True, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) - ) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=0.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """Forward function. - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B_, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = q @ k.transpose(-2, -1) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1) - ].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 - ) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1 - ).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """Swin Transformer Block. - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__( - self, - dim, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - ): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop - ) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition( - shifted_x, self.window_size - ) # nW*B, window_size, window_size, C - x_windows = x_windows.view( - -1, self.window_size * self.window_size, C - ) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """Patch Merging Layer - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False, - ): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList( - [ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer, - ) - for i in range(depth) - ] - ) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - w_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition( - img_mask, self.window_size - ) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( - attn_mask == 0, float(0.0) - ) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """Image to Patch Embedding - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -class SwinTransformer(nn.Module): - """Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - use_checkpoint=False, - ): - super().__init__() - - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None, - ) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [ - pretrain_img_size[0] // patch_size[0], - pretrain_img_size[1] // patch_size[1], - ] - - self.absolute_pos_embed = nn.Parameter( - torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) - ) - trunc_normal_(self.absolute_pos_embed, std=0.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = BasicLayer( - dim=int(embed_dim * 2 ** i_layer), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], - norm_layer=norm_layer, - downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint, - ) - self.layers.append(layer) - - num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f"norm{i_layer}" - self.add_module(layer_name, layer) - - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def forward(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = {} - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs["res{}".format(i + 2)] = out - - return outs - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() - - -@BACKBONE_REGISTRY.register() -class D2SwinTransformer(SwinTransformer, Backbone): - def __init__(self, cfg, input_shape): - - pretrain_img_size = cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE - patch_size = cfg.MODEL.SWIN.PATCH_SIZE - in_chans = 3 - embed_dim = cfg.MODEL.SWIN.EMBED_DIM - depths = cfg.MODEL.SWIN.DEPTHS - num_heads = cfg.MODEL.SWIN.NUM_HEADS - window_size = cfg.MODEL.SWIN.WINDOW_SIZE - mlp_ratio = cfg.MODEL.SWIN.MLP_RATIO - qkv_bias = cfg.MODEL.SWIN.QKV_BIAS - qk_scale = cfg.MODEL.SWIN.QK_SCALE - drop_rate = cfg.MODEL.SWIN.DROP_RATE - attn_drop_rate = cfg.MODEL.SWIN.ATTN_DROP_RATE - drop_path_rate = cfg.MODEL.SWIN.DROP_PATH_RATE - norm_layer = nn.LayerNorm - ape = cfg.MODEL.SWIN.APE - patch_norm = cfg.MODEL.SWIN.PATCH_NORM - use_checkpoint = cfg.MODEL.SWIN.USE_CHECKPOINT - - super().__init__( - pretrain_img_size, - patch_size, - in_chans, - embed_dim, - depths, - num_heads, - window_size, - mlp_ratio, - qkv_bias, - qk_scale, - drop_rate, - attn_drop_rate, - drop_path_rate, - norm_layer, - ape, - patch_norm, - use_checkpoint=use_checkpoint, - ) - - self._out_features = cfg.MODEL.SWIN.OUT_FEATURES - - self._out_feature_strides = { - "res2": 4, - "res3": 8, - "res4": 16, - "res5": 32, - } - self._out_feature_channels = { - "res2": self.num_features[0], - "res3": self.num_features[1], - "res4": self.num_features[2], - "res5": self.num_features[3], - } - - def forward(self, x): - """ - Args: - x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. - Returns: - dict[str->Tensor]: names and the corresponding features - """ - assert ( - x.dim() == 4 - ), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!" - outputs = {} - y = super().forward(x) - for k in y.keys(): - if k in self._out_features: - outputs[k] = y[k] - return outputs - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } - - @property - def size_divisibility(self): - return 32 diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/necks/__init__.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/necks/__init__.py deleted file mode 100644 index 9b9d3d5b3fe80247642d962edd6fb787537d01d6..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/necks/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .fpn import FPN -from .multilevel_neck import MultiLevelNeck - -__all__ = ['FPN', 'MultiLevelNeck'] diff --git a/spaces/TabPFN/TabPFNPrediction/TabPFN/scripts/baseline_prediction_interface.py b/spaces/TabPFN/TabPFNPrediction/TabPFN/scripts/baseline_prediction_interface.py deleted file mode 100644 index 298a046c4c3c39cbddbcdc5ee47c68606c706b2c..0000000000000000000000000000000000000000 --- a/spaces/TabPFN/TabPFNPrediction/TabPFN/scripts/baseline_prediction_interface.py +++ /dev/null @@ -1,38 +0,0 @@ -import tqdm -import numpy as np - -def baseline_predict(metric_function, eval_xs, eval_ys, categorical_feats, metric_used=None, eval_pos=2, max_time=300, **kwargs): - """ - Baseline prediction interface. - :param metric_function: - :param eval_xs: - :param eval_ys: - :param categorical_feats: - :param metric_used: - :param eval_pos: - :param max_time: Scheduled maximum time - :param kwargs: - :return: list [np.array(metrics), np.array(outputs), best_configs] or [None, None, None] if failed - """ - - metrics = [] - outputs = [] - best_configs = [] - eval_splits = list(zip(eval_xs.transpose(0, 1), eval_ys.transpose(0, 1))) - for eval_x, eval_y in tqdm.tqdm(eval_splits, desc='Calculating splits'+str(metric_function)+' '+str(eval_pos)): - try: - metric, output, best_config = metric_function(eval_x[:eval_pos], - eval_y[:eval_pos], - eval_x[eval_pos:], - eval_y[eval_pos:], - categorical_feats, - metric_used=metric_used - , max_time=max_time) - metrics += [metric] - outputs += [output] - best_configs += [best_config] - return np.array(metrics), np.array(outputs), best_configs - except Exception as e: - print(f'There was an exception in {metric_function}') - print(e) - return None, None, None \ No newline at end of file diff --git a/spaces/Tabaxi3K/FrankenFlic/style.css b/spaces/Tabaxi3K/FrankenFlic/style.css deleted file mode 100644 index b5d0196f5bd49b4824117ab7ebb6f6ed76053000..0000000000000000000000000000000000000000 --- a/spaces/Tabaxi3K/FrankenFlic/style.css +++ /dev/null @@ -1,13 +0,0 @@ -body { - color: #fff; - background-color: #000; -} - -.stButton>button { - color: #000; -} - -h1 { - font-size: 70px; - font-family: Georgia, 'Times New Roman', Times, serif; -} \ No newline at end of file diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/utils.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/utils.py deleted file mode 100644 index 36607eda2ec5b8bdc4fe87e256cc8f3b1a79f707..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/utils.py +++ /dev/null @@ -1,1094 +0,0 @@ -""" -requests.utils -~~~~~~~~~~~~~~ - -This module provides utility functions that are used within Requests -that are also useful for external consumption. -""" - -import codecs -import contextlib -import io -import os -import re -import socket -import struct -import sys -import tempfile -import warnings -import zipfile -from collections import OrderedDict - -from pip._vendor.urllib3.util import make_headers, parse_url - -from . import certs -from .__version__ import __version__ - -# to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import ( # noqa: F401 - _HEADER_VALIDATORS_BYTE, - _HEADER_VALIDATORS_STR, - HEADER_VALIDATORS, - to_native_string, -) -from .compat import ( - Mapping, - basestring, - bytes, - getproxies, - getproxies_environment, - integer_types, -) -from .compat import parse_http_list as _parse_list_header -from .compat import ( - proxy_bypass, - proxy_bypass_environment, - quote, - str, - unquote, - urlparse, - urlunparse, -) -from .cookies import cookiejar_from_dict -from .exceptions import ( - FileModeWarning, - InvalidHeader, - InvalidURL, - UnrewindableBodyError, -) -from .structures import CaseInsensitiveDict - -NETRC_FILES = (".netrc", "_netrc") - -DEFAULT_CA_BUNDLE_PATH = certs.where() - -DEFAULT_PORTS = {"http": 80, "https": 443} - -# Ensure that ', ' is used to preserve previous delimiter behavior. -DEFAULT_ACCEPT_ENCODING = ", ".join( - re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) -) - - -if sys.platform == "win32": - # provide a proxy_bypass version on Windows without DNS lookups - - def proxy_bypass_registry(host): - try: - import winreg - except ImportError: - return False - - try: - internetSettings = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", - ) - # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) - # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] - except (OSError, ValueError): - return False - if not proxyEnable or not proxyOverride: - return False - - # make a check value list from the registry entry: replace the - # '' string by the localhost entry and the corresponding - # canonical entry. - proxyOverride = proxyOverride.split(";") - # now check if we match one of the registry values. - for test in proxyOverride: - if test == "": - if "." not in host: - return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char - if re.match(test, host, re.I): - return True - return False - - def proxy_bypass(host): # noqa - """Return True, if the host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or the registry. - """ - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_registry(host) - - -def dict_to_sequence(d): - """Returns an internal sequence dictionary update.""" - - if hasattr(d, "items"): - d = d.items() - - return d - - -def super_len(o): - total_length = None - current_position = 0 - - if hasattr(o, "__len__"): - total_length = len(o) - - elif hasattr(o, "len"): - total_length = o.len - - elif hasattr(o, "fileno"): - try: - fileno = o.fileno() - except (io.UnsupportedOperation, AttributeError): - # AttributeError is a surprising exception, seeing as how we've just checked - # that `hasattr(o, 'fileno')`. It happens for objects obtained via - # `Tarfile.extractfile()`, per issue 5229. - pass - else: - total_length = os.fstat(fileno).st_size - - # Having used fstat to determine the file length, we need to - # confirm that this file was opened up in binary mode. - if "b" not in o.mode: - warnings.warn( - ( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode." - ), - FileModeWarning, - ) - - if hasattr(o, "tell"): - try: - current_position = o.tell() - except OSError: - # This can happen in some weird situations, such as when the file - # is actually a special file descriptor like stdin. In this - # instance, we don't know what the length is, so set it to zero and - # let requests chunk it instead. - if total_length is not None: - current_position = total_length - else: - if hasattr(o, "seek") and total_length is None: - # StringIO and BytesIO have seek but no usable fileno - try: - # seek to end of file - o.seek(0, 2) - total_length = o.tell() - - # seek back to current position to support - # partially read file-like objects - o.seek(current_position or 0) - except OSError: - total_length = 0 - - if total_length is None: - total_length = 0 - - return max(0, total_length - current_position) - - -def get_netrc_auth(url, raise_errors=False): - """Returns the Requests tuple auth for a given url from netrc.""" - - netrc_file = os.environ.get("NETRC") - if netrc_file is not None: - netrc_locations = (netrc_file,) - else: - netrc_locations = (f"~/{f}" for f in NETRC_FILES) - - try: - from netrc import NetrcParseError, netrc - - netrc_path = None - - for f in netrc_locations: - try: - loc = os.path.expanduser(f) - except KeyError: - # os.path.expanduser can fail when $HOME is undefined and - # getpwuid fails. See https://bugs.python.org/issue20164 & - # https://github.com/psf/requests/issues/1846 - return - - if os.path.exists(loc): - netrc_path = loc - break - - # Abort early if there isn't one. - if netrc_path is None: - return - - ri = urlparse(url) - - # Strip port numbers from netloc. This weird `if...encode`` dance is - # used for Python 3.2, which doesn't support unicode literals. - splitstr = b":" - if isinstance(url, str): - splitstr = splitstr.decode("ascii") - host = ri.netloc.split(splitstr)[0] - - try: - _netrc = netrc(netrc_path).authenticators(host) - if _netrc: - # Return with login / password - login_i = 0 if _netrc[0] else 1 - return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, OSError): - # If there was a parsing error or a permissions issue reading the file, - # we'll just skip netrc auth unless explicitly asked to raise errors. - if raise_errors: - raise - - # App Engine hackiness. - except (ImportError, AttributeError): - pass - - -def guess_filename(obj): - """Tries to guess the filename of the given object.""" - name = getattr(obj, "name", None) - if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": - return os.path.basename(name) - - -def extract_zipped_paths(path): - """Replace nonexistent paths that look like they refer to a member of a zip - archive with the location of an extracted copy of the target, or else - just return the provided path unchanged. - """ - if os.path.exists(path): - # this is already a valid path, no need to do anything further - return path - - # find the first valid part of the provided path and treat that as a zip archive - # assume the rest of the path is the name of a member in the archive - archive, member = os.path.split(path) - while archive and not os.path.exists(archive): - archive, prefix = os.path.split(archive) - if not prefix: - # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), - # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users - break - member = "/".join([prefix, member]) - - if not zipfile.is_zipfile(archive): - return path - - zip_file = zipfile.ZipFile(archive) - if member not in zip_file.namelist(): - return path - - # we have a valid zip archive and a valid member of that archive - tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, member.split("/")[-1]) - if not os.path.exists(extracted_path): - # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition - with atomic_open(extracted_path) as file_handler: - file_handler.write(zip_file.read(member)) - return extracted_path - - -@contextlib.contextmanager -def atomic_open(filename): - """Write a file to the disk in an atomic fashion""" - tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) - try: - with os.fdopen(tmp_descriptor, "wb") as tmp_handler: - yield tmp_handler - os.replace(tmp_name, filename) - except BaseException: - os.remove(tmp_name) - raise - - -def from_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. Unless it can not be represented as such, return an - OrderedDict, e.g., - - :: - - >>> from_key_val_list([('key', 'val')]) - OrderedDict([('key', 'val')]) - >>> from_key_val_list('string') - Traceback (most recent call last): - ... - ValueError: cannot encode objects that are not 2-tuples - >>> from_key_val_list({'key': 'val'}) - OrderedDict([('key', 'val')]) - - :rtype: OrderedDict - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError("cannot encode objects that are not 2-tuples") - - return OrderedDict(value) - - -def to_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. If it can be, return a list of tuples, e.g., - - :: - - >>> to_key_val_list([('key', 'val')]) - [('key', 'val')] - >>> to_key_val_list({'key': 'val'}) - [('key', 'val')] - >>> to_key_val_list('string') - Traceback (most recent call last): - ... - ValueError: cannot encode objects that are not 2-tuples - - :rtype: list - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError("cannot encode objects that are not 2-tuples") - - if isinstance(value, Mapping): - value = value.items() - - return list(value) - - -# From mitsuhiko/werkzeug (used with permission). -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - :rtype: list - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -# From mitsuhiko/werkzeug (used with permission). -def parse_dict_header(value): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict: - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - :param value: a string with a dict header. - :return: :class:`dict` - :rtype: dict - """ - result = {} - for item in _parse_list_header(value): - if "=" not in item: - result[item] = None - continue - name, value = item.split("=", 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -# From mitsuhiko/werkzeug (used with permission). -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - :param value: the header value to unquote. - :rtype: str - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != "\\\\": - return value.replace("\\\\", "\\").replace('\\"', '"') - return value - - -def dict_from_cookiejar(cj): - """Returns a key/value dictionary from a CookieJar. - - :param cj: CookieJar object to extract cookies from. - :rtype: dict - """ - - cookie_dict = {} - - for cookie in cj: - cookie_dict[cookie.name] = cookie.value - - return cookie_dict - - -def add_dict_to_cookiejar(cj, cookie_dict): - """Returns a CookieJar from a key/value dictionary. - - :param cj: CookieJar to insert cookies into. - :param cookie_dict: Dict of key/values to insert into CookieJar. - :rtype: CookieJar - """ - - return cookiejar_from_dict(cookie_dict, cj) - - -def get_encodings_from_content(content): - """Returns encodings from given content string. - - :param content: bytestring to extract encodings from. - """ - warnings.warn( - ( - "In requests 3.0, get_encodings_from_content will be removed. For " - "more information, please see the discussion on issue #2266. (This" - " warning should only appear once.)" - ), - DeprecationWarning, - ) - - charset_re = re.compile(r']', flags=re.I) - pragma_re = re.compile(r']', flags=re.I) - xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - - return ( - charset_re.findall(content) - + pragma_re.findall(content) - + xml_re.findall(content) - ) - - -def _parse_content_type_header(header): - """Returns content type and parameters from given header - - :param header: string - :return: tuple containing content type and dictionary of - parameters - """ - - tokens = header.split(";") - content_type, params = tokens[0].strip(), tokens[1:] - params_dict = {} - items_to_strip = "\"' " - - for param in params: - param = param.strip() - if param: - key, value = param, True - index_of_equals = param.find("=") - if index_of_equals != -1: - key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1 :].strip(items_to_strip) - params_dict[key.lower()] = value - return content_type, params_dict - - -def get_encoding_from_headers(headers): - """Returns encodings from given HTTP Header Dict. - - :param headers: dictionary to extract encoding from. - :rtype: str - """ - - content_type = headers.get("content-type") - - if not content_type: - return None - - content_type, params = _parse_content_type_header(content_type) - - if "charset" in params: - return params["charset"].strip("'\"") - - if "text" in content_type: - return "ISO-8859-1" - - if "application/json" in content_type: - # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset - return "utf-8" - - -def stream_decode_response_unicode(iterator, r): - """Stream decodes an iterator.""" - - if r.encoding is None: - yield from iterator - return - - decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") - for chunk in iterator: - rv = decoder.decode(chunk) - if rv: - yield rv - rv = decoder.decode(b"", final=True) - if rv: - yield rv - - -def iter_slices(string, slice_length): - """Iterate over slices of a string.""" - pos = 0 - if slice_length is None or slice_length <= 0: - slice_length = len(string) - while pos < len(string): - yield string[pos : pos + slice_length] - pos += slice_length - - -def get_unicode_from_response(r): - """Returns the requested content back in unicode. - - :param r: Response object to get unicode content from. - - Tried: - - 1. charset from content-type - 2. fall back and replace all unicode characters - - :rtype: str - """ - warnings.warn( - ( - "In requests 3.0, get_unicode_from_response will be removed. For " - "more information, please see the discussion on issue #2266. (This" - " warning should only appear once.)" - ), - DeprecationWarning, - ) - - tried_encodings = [] - - # Try charset from content-type - encoding = get_encoding_from_headers(r.headers) - - if encoding: - try: - return str(r.content, encoding) - except UnicodeError: - tried_encodings.append(encoding) - - # Fall back: - try: - return str(r.content, encoding, errors="replace") - except TypeError: - return r.content - - -# The unreserved URI characters (RFC 3986) -UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" -) - - -def unquote_unreserved(uri): - """Un-escape any percent-escape sequences in a URI that are unreserved - characters. This leaves all reserved, illegal and non-ASCII bytes encoded. - - :rtype: str - """ - parts = uri.split("%") - for i in range(1, len(parts)): - h = parts[i][0:2] - if len(h) == 2 and h.isalnum(): - try: - c = chr(int(h, 16)) - except ValueError: - raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") - - if c in UNRESERVED_SET: - parts[i] = c + parts[i][2:] - else: - parts[i] = f"%{parts[i]}" - else: - parts[i] = f"%{parts[i]}" - return "".join(parts) - - -def requote_uri(uri): - """Re-quote the given URI. - - This function passes the given URI through an unquote/quote cycle to - ensure that it is fully and consistently quoted. - - :rtype: str - """ - safe_with_percent = "!#$%&'()*+,/:;=?@[]~" - safe_without_percent = "!#$&'()*+,/:;=?@[]~" - try: - # Unquote only the unreserved characters - # Then quote only illegal characters (do not quote reserved, - # unreserved, or '%') - return quote(unquote_unreserved(uri), safe=safe_with_percent) - except InvalidURL: - # We couldn't unquote the given URI, so let's try quoting it, but - # there may be unquoted '%'s in the URI. We need to make sure they're - # properly quoted so they do not cause issues elsewhere. - return quote(uri, safe=safe_without_percent) - - -def address_in_network(ip, net): - """This function allows you to check if an IP belongs to a network subnet - - Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 - returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 - - :rtype: bool - """ - ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] - netaddr, bits = net.split("/") - netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask - return (ipaddr & netmask) == (network & netmask) - - -def dotted_netmask(mask): - """Converts mask from /xx format to xxx.xxx.xxx.xxx - - Example: if mask is 24 function returns 255.255.255.0 - - :rtype: str - """ - bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack(">I", bits)) - - -def is_ipv4_address(string_ip): - """ - :rtype: bool - """ - try: - socket.inet_aton(string_ip) - except OSError: - return False - return True - - -def is_valid_cidr(string_network): - """ - Very simple check of the cidr format in no_proxy variable. - - :rtype: bool - """ - if string_network.count("/") == 1: - try: - mask = int(string_network.split("/")[1]) - except ValueError: - return False - - if mask < 1 or mask > 32: - return False - - try: - socket.inet_aton(string_network.split("/")[0]) - except OSError: - return False - else: - return False - return True - - -@contextlib.contextmanager -def set_environ(env_name, value): - """Set the environment variable 'env_name' to 'value' - - Save previous value, yield, and then restore the previous value stored in - the environment variable 'env_name'. - - If 'value' is None, do nothing""" - value_changed = value is not None - if value_changed: - old_value = os.environ.get(env_name) - os.environ[env_name] = value - try: - yield - finally: - if value_changed: - if old_value is None: - del os.environ[env_name] - else: - os.environ[env_name] = old_value - - -def should_bypass_proxies(url, no_proxy): - """ - Returns whether we should bypass proxies or not. - - :rtype: bool - """ - # Prioritize lowercase environment variables over uppercase - # to keep a consistent behaviour with other http projects (curl, wget). - def get_proxy(key): - return os.environ.get(key) or os.environ.get(key.upper()) - - # First check whether no_proxy is defined. If it is, check that the URL - # we're getting isn't in the no_proxy list. - no_proxy_arg = no_proxy - if no_proxy is None: - no_proxy = get_proxy("no_proxy") - parsed = urlparse(url) - - if parsed.hostname is None: - # URLs don't always have hostnames, e.g. file:/// urls. - return True - - if no_proxy: - # We need to check whether we match here. We need to see if we match - # the end of the hostname, both with and without the port. - no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) - - if is_ipv4_address(parsed.hostname): - for proxy_ip in no_proxy: - if is_valid_cidr(proxy_ip): - if address_in_network(parsed.hostname, proxy_ip): - return True - elif parsed.hostname == proxy_ip: - # If no_proxy ip was defined in plain IP notation instead of cidr notation & - # matches the IP of the index - return True - else: - host_with_port = parsed.hostname - if parsed.port: - host_with_port += f":{parsed.port}" - - for host in no_proxy: - if parsed.hostname.endswith(host) or host_with_port.endswith(host): - # The URL does match something in no_proxy, so we don't want - # to apply the proxies on this URL. - return True - - with set_environ("no_proxy", no_proxy_arg): - # parsed.hostname can be `None` in cases such as a file URI. - try: - bypass = proxy_bypass(parsed.hostname) - except (TypeError, socket.gaierror): - bypass = False - - if bypass: - return True - - return False - - -def get_environ_proxies(url, no_proxy=None): - """ - Return a dict of environment proxies. - - :rtype: dict - """ - if should_bypass_proxies(url, no_proxy=no_proxy): - return {} - else: - return getproxies() - - -def select_proxy(url, proxies): - """Select a proxy for the url, if applicable. - - :param url: The url being for the request - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs - """ - proxies = proxies or {} - urlparts = urlparse(url) - if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get("all")) - - proxy_keys = [ - urlparts.scheme + "://" + urlparts.hostname, - urlparts.scheme, - "all://" + urlparts.hostname, - "all", - ] - proxy = None - for proxy_key in proxy_keys: - if proxy_key in proxies: - proxy = proxies[proxy_key] - break - - return proxy - - -def resolve_proxies(request, proxies, trust_env=True): - """This method takes proxy information from a request and configuration - input to resolve a mapping of target proxies. This will consider settings - such a NO_PROXY to strip proxy configurations. - - :param request: Request or PreparedRequest - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs - :param trust_env: Boolean declaring whether to trust environment configs - - :rtype: dict - """ - proxies = proxies if proxies is not None else {} - url = request.url - scheme = urlparse(url).scheme - no_proxy = proxies.get("no_proxy") - new_proxies = proxies.copy() - - if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): - environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - - proxy = environ_proxies.get(scheme, environ_proxies.get("all")) - - if proxy: - new_proxies.setdefault(scheme, proxy) - return new_proxies - - -def default_user_agent(name="python-requests"): - """ - Return a string representing the default user agent. - - :rtype: str - """ - return f"{name}/{__version__}" - - -def default_headers(): - """ - :rtype: requests.structures.CaseInsensitiveDict - """ - return CaseInsensitiveDict( - { - "User-Agent": default_user_agent(), - "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, - "Accept": "*/*", - "Connection": "keep-alive", - } - ) - - -def parse_header_links(value): - """Return a list of parsed link headers proxies. - - i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" - - :rtype: list - """ - - links = [] - - replace_chars = " '\"" - - value = value.strip(replace_chars) - if not value: - return links - - for val in re.split(", *<", value): - try: - url, params = val.split(";", 1) - except ValueError: - url, params = val, "" - - link = {"url": url.strip("<> '\"")} - - for param in params.split(";"): - try: - key, value = param.split("=") - except ValueError: - break - - link[key.strip(replace_chars)] = value.strip(replace_chars) - - links.append(link) - - return links - - -# Null bytes; no need to recreate these on each call to guess_json_utf -_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 -_null2 = _null * 2 -_null3 = _null * 3 - - -def guess_json_utf(data): - """ - :rtype: str - """ - # JSON always starts with two ASCII characters, so detection is as - # easy as counting the nulls and from their location and count - # determine the encoding. Also detect a BOM, if present. - sample = data[:4] - if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return "utf-32" # BOM included - if sample[:3] == codecs.BOM_UTF8: - return "utf-8-sig" # BOM included, MS style (discouraged) - if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return "utf-16" # BOM included - nullcount = sample.count(_null) - if nullcount == 0: - return "utf-8" - if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return "utf-16-be" - if sample[1::2] == _null2: # 2nd and 4th are null - return "utf-16-le" - # Did not detect 2 valid UTF-16 ascii-range characters - if nullcount == 3: - if sample[:3] == _null3: - return "utf-32-be" - if sample[1:] == _null3: - return "utf-32-le" - # Did not detect a valid UTF-32 ascii-range character - return None - - -def prepend_scheme_if_needed(url, new_scheme): - """Given a URL that may or may not have a scheme, prepend the given scheme. - Does not replace a present scheme with the one provided as an argument. - - :rtype: str - """ - parsed = parse_url(url) - scheme, auth, host, port, path, query, fragment = parsed - - # A defect in urlparse determines that there isn't a netloc present in some - # urls. We previously assumed parsing was overly cautious, and swapped the - # netloc and path. Due to a lack of tests on the original defect, this is - # maintained with parse_url for backwards compatibility. - netloc = parsed.netloc - if not netloc: - netloc, path = path, netloc - - if auth: - # parse_url doesn't provide the netloc with auth - # so we'll add it ourselves. - netloc = "@".join([auth, netloc]) - if scheme is None: - scheme = new_scheme - if path is None: - path = "" - - return urlunparse((scheme, netloc, path, "", query, fragment)) - - -def get_auth_from_url(url): - """Given a url with authentication components, extract them into a tuple of - username,password. - - :rtype: (str,str) - """ - parsed = urlparse(url) - - try: - auth = (unquote(parsed.username), unquote(parsed.password)) - except (AttributeError, TypeError): - auth = ("", "") - - return auth - - -def check_header_validity(header): - """Verifies that header parts don't contain leading whitespace - reserved characters, or return characters. - - :param header: tuple, in the format (name, value). - """ - name, value = header - _validate_header_part(header, name, 0) - _validate_header_part(header, value, 1) - - -def _validate_header_part(header, header_part, header_validator_index): - if isinstance(header_part, str): - validator = _HEADER_VALIDATORS_STR[header_validator_index] - elif isinstance(header_part, bytes): - validator = _HEADER_VALIDATORS_BYTE[header_validator_index] - else: - raise InvalidHeader( - f"Header part ({header_part!r}) from {header} " - f"must be of type str or bytes, not {type(header_part)}" - ) - - if not validator.match(header_part): - header_kind = "name" if header_validator_index == 0 else "value" - raise InvalidHeader( - f"Invalid leading whitespace, reserved character(s), or return" - f"character(s) in header {header_kind}: {header_part!r}" - ) - - -def urldefragauth(url): - """ - Given a url remove the fragment and the authentication part. - - :rtype: str - """ - scheme, netloc, path, params, query, fragment = urlparse(url) - - # see func:`prepend_scheme_if_needed` - if not netloc: - netloc, path = path, netloc - - netloc = netloc.rsplit("@", 1)[-1] - - return urlunparse((scheme, netloc, path, params, query, "")) - - -def rewind_body(prepared_request): - """Move file pointer back to its recorded starting position - so it can be read again on redirect. - """ - body_seek = getattr(prepared_request.body, "seek", None) - if body_seek is not None and isinstance( - prepared_request._body_position, integer_types - ): - try: - body_seek(prepared_request._body_position) - except OSError: - raise UnrewindableBodyError( - "An error occurred when rewinding request body for redirect." - ) - else: - raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/request.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/request.py deleted file mode 100644 index 330766ef4f3403e05a6ad8ec30f25fe05fdbc199..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/request.py +++ /dev/null @@ -1,137 +0,0 @@ -from __future__ import absolute_import - -from base64 import b64encode - -from ..exceptions import UnrewindableBodyError -from ..packages.six import b, integer_types - -# Pass as a value within ``headers`` to skip -# emitting some HTTP headers that are added automatically. -# The only headers that are supported are ``Accept-Encoding``, -# ``Host``, and ``User-Agent``. -SKIP_HEADER = "@@@SKIP_HEADER@@@" -SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"]) - -ACCEPT_ENCODING = "gzip,deflate" - -_FAILEDTELL = object() - - -def make_headers( - keep_alive=None, - accept_encoding=None, - user_agent=None, - basic_auth=None, - proxy_basic_auth=None, - disable_cache=None, -): - """ - Shortcuts for generating request headers. - - :param keep_alive: - If ``True``, adds 'connection: keep-alive' header. - - :param accept_encoding: - Can be a boolean, list, or string. - ``True`` translates to 'gzip,deflate'. - List will get joined by comma. - String will be used as provided. - - :param user_agent: - String representing the user-agent you want, such as - "python-urllib3/0.6" - - :param basic_auth: - Colon-separated username:password string for 'authorization: basic ...' - auth header. - - :param proxy_basic_auth: - Colon-separated username:password string for 'proxy-authorization: basic ...' - auth header. - - :param disable_cache: - If ``True``, adds 'cache-control: no-cache' header. - - Example:: - - >>> make_headers(keep_alive=True, user_agent="Batman/1.0") - {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} - >>> make_headers(accept_encoding=True) - {'accept-encoding': 'gzip,deflate'} - """ - headers = {} - if accept_encoding: - if isinstance(accept_encoding, str): - pass - elif isinstance(accept_encoding, list): - accept_encoding = ",".join(accept_encoding) - else: - accept_encoding = ACCEPT_ENCODING - headers["accept-encoding"] = accept_encoding - - if user_agent: - headers["user-agent"] = user_agent - - if keep_alive: - headers["connection"] = "keep-alive" - - if basic_auth: - headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8") - - if proxy_basic_auth: - headers["proxy-authorization"] = "Basic " + b64encode( - b(proxy_basic_auth) - ).decode("utf-8") - - if disable_cache: - headers["cache-control"] = "no-cache" - - return headers - - -def set_file_position(body, pos): - """ - If a position is provided, move file to that point. - Otherwise, we'll attempt to record a position for future use. - """ - if pos is not None: - rewind_body(body, pos) - elif getattr(body, "tell", None) is not None: - try: - pos = body.tell() - except (IOError, OSError): - # This differentiates from None, allowing us to catch - # a failed `tell()` later when trying to rewind the body. - pos = _FAILEDTELL - - return pos - - -def rewind_body(body, body_pos): - """ - Attempt to rewind body to a certain position. - Primarily used for request redirects and retries. - - :param body: - File-like object that supports seek. - - :param int pos: - Position to seek to in file. - """ - body_seek = getattr(body, "seek", None) - if body_seek is not None and isinstance(body_pos, integer_types): - try: - body_seek(body_pos) - except (IOError, OSError): - raise UnrewindableBodyError( - "An error occurred when rewinding request body for redirect/retry." - ) - elif body_pos is _FAILEDTELL: - raise UnrewindableBodyError( - "Unable to record file position for rewinding " - "request body during a redirect/retry." - ) - else: - raise ValueError( - "body_pos must be of type integer, instead it was %s." % type(body_pos) - ) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py deleted file mode 100644 index 7770c922c84fabe0031333a4de305dd6d6852911..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Interface adapters for low-level readers. -""" - -import abc -import io -import itertools -from typing import BinaryIO, List - -from .abc import Traversable, TraversableResources - - -class SimpleReader(abc.ABC): - """ - The minimum, low-level interface required from a resource - provider. - """ - - @property - @abc.abstractmethod - def package(self) -> str: - """ - The name of the package for which this reader loads resources. - """ - - @abc.abstractmethod - def children(self) -> List['SimpleReader']: - """ - Obtain an iterable of SimpleReader for available - child containers (e.g. directories). - """ - - @abc.abstractmethod - def resources(self) -> List[str]: - """ - Obtain available named resources for this virtual package. - """ - - @abc.abstractmethod - def open_binary(self, resource: str) -> BinaryIO: - """ - Obtain a File-like for a named resource. - """ - - @property - def name(self): - return self.package.split('.')[-1] - - -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader: SimpleReader): - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent: ResourceContainer, name: str): - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - -class TraversableReader(TraversableResources, SimpleReader): - """ - A TraversableResources based on SimpleReader. Resource providers - may derive from this class to provide the TraversableResources - interface by supplying the SimpleReader interface. - """ - - def files(self): - return ResourceContainer(self) diff --git a/spaces/TandCAcceptMe/face-swap-docker/roop/metadata.py b/spaces/TandCAcceptMe/face-swap-docker/roop/metadata.py deleted file mode 100644 index 5fe8e17475d9a05dda9b5ad9d43b6e050e03558f..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/roop/metadata.py +++ /dev/null @@ -1,2 +0,0 @@ -name = 'roop unleashed' -version = '2.6.6' diff --git a/spaces/ThirdEyeData/Customer-Complaints-Categorization/clean_data.py b/spaces/ThirdEyeData/Customer-Complaints-Categorization/clean_data.py deleted file mode 100644 index ac0a4c546c2f364e5b33873301fff2214a1b0831..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Customer-Complaints-Categorization/clean_data.py +++ /dev/null @@ -1,87 +0,0 @@ -import nltk -from nltk.corpus import stopwords -from nltk.stem import WordNetLemmatizer -import warnings -import re -nltk.download("stopwords") -nltk.download("wordnet") -nltk.download("words") -lemmatizer = WordNetLemmatizer() - -stop_words = set(stopwords.words('english')) - -contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", - "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", - "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", - "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", - "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", - "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", - "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", - "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", - "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", - "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", - "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", - "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", - "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", - "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", - "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", - "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", - "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", - "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", - "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", - "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", - "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have", - "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", - "you're": "you are", "you've": "you have"} - -def cleaned_complaints(text): - import nltk - from nltk.corpus import stopwords - from nltk.stem import WordNetLemmatizer - from nltk.corpus import words - import warnings - import re - - lemmatizer = WordNetLemmatizer() - - stop_words = set(stopwords.words('english')) - - contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", - "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", - "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", - "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", - "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", - "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", - "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", - "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", - "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", - "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", - "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", - "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", - "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", - "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", - "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", - "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", - "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", - "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", - "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", - "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", - "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have", - "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", - "you're": "you are", "you've": "you have"} - - newString=re.sub(r'@[A-Za-z0-9]+','',text) #removing user mentions - newString=re.sub("#","",newString) #removing hashtag symbol - newString= ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in newString.split(" ")]) #contraction mapping - newString= re.sub(r'http\S+', '', newString) #removing links - newString= re.sub(r"'s\b","",newString) #removing 's - letters_only = re.sub("[^a-zA-Z]", " ", newString) #Fetching out only letters - lower_case = letters_only.lower() #converting all words to lowercase - tokens = [w for w in lower_case.split() if not w in stop_words]#stopwords removal - tokens = [x for x in tokens if x in words.words()] -# tokens= lower_case.split() - newString='' - for i in tokens: - newString=newString+lemmatizer.lemmatize(i)+' ' #converting words using lemmatisation - return newString.strip() - diff --git a/spaces/Um124/Lung_Cancer_Prediction/Lung_Cancer_Prediction.py b/spaces/Um124/Lung_Cancer_Prediction/Lung_Cancer_Prediction.py deleted file mode 100644 index 0754fe693a9890762763285e4dda3dce50ddaa58..0000000000000000000000000000000000000000 --- a/spaces/Um124/Lung_Cancer_Prediction/Lung_Cancer_Prediction.py +++ /dev/null @@ -1,88 +0,0 @@ -import gradio as gr -import pandas as pd -import numpy as np -from joblib import load - -def predict_Level( - Age, Gender, Air_Pollution, Alcohol_use, - Dust_Allergy, OccuPational_Hazards, Genetic_Risk, - chronic_Lung_Disease, Balanced_Diet, Obesity, Smoking, - Passive_Smoker, Chest_Pain, Coughing_of_Blood, Fatigue, - Weight_Loss, Shortness_of_Breath, Wheezing,Swallowing_Difficulty, - Clubbing_of_Finger_Nails, Frequent_Cold,Dry_Cough, Snoring -): - -# load model - model = load('Lung_cancer_prediction.joblib') - - # Create a dict array from the parameters - data = { - 'Age': [Age], - 'Gender': [Gender], - 'Air Pollution': [Air_Pollution], - 'Alcohol use': [Alcohol_use], - 'Dust Allergy': [Dust_Allergy], - 'OccuPational Hazards': [OccuPational_Hazards], - 'Genetic Risk': [Genetic_Risk], - 'chronic Lung Disease': [chronic_Lung_Disease], - 'Balanced Diet': [Balanced_Diet], - 'Obesity': [Obesity], - 'Smoking': [Smoking], - 'Passive Smoker': [Passive_Smoker], - 'Chest Pain': [Chest_Pain], - 'Coughing of Blood': [Coughing_of_Blood], - 'Fatigue': [Fatigue], - 'Weight Loss': [Weight_Loss], - 'Shortness of Breath': [Shortness_of_Breath], - 'Wheezing': [Wheezing], - 'Swallowing Difficulty': [Swallowing_Difficulty], - 'Clubbing of Finger Nails': [Clubbing_of_Finger_Nails], - 'Frequent Cold': [Frequent_Cold], - 'Dry Cough': [Dry_Cough], - 'Snoring': [Snoring], - } - Xinp = pd.DataFrame(data) - print(Xinp) - - # Predict the level - Level = model.predict(Xinp) - - # return the level - return Level[0] - -# Create the gradio interface - -ui = gr.Interface( - fn = predict_Level, - inputs = [ - gr.inputs.Textbox(placeholder='Age', default="33", numeric=True,label='Age'), - gr.inputs.Textbox(placeholder='Gender', default="1", numeric=True,label='Gender'), - gr.inputs.Textbox(placeholder='Air_Pollution', default="2",numeric=True,label='Air Pollution'), - gr.inputs.Textbox(placeholder='Alcohol_use', default="4",numeric=True,label='Alcohol use'), - gr.inputs.Textbox(placeholder='Dust_Allergy', default="5",numeric=True,label='Dust Allergy'), - gr.inputs.Textbox(placeholder='OccuPational_Hazards', default="4",numeric=True,label='OccuPational Hazards'), - gr.inputs.Textbox(placeholder='Genetic_Risk', default="3",numeric=True,label='Genetic Risk'), - gr.inputs.Textbox(placeholder='chronic_Lung_Disease', default="2",numeric=True,label='chronic Lung Disease'), - gr.inputs.Textbox(placeholder='Balanced_Diet', default="2",numeric=True,label='Balanced Diet'), - gr.inputs.Textbox(placeholder='Obesity', default="4", numeric=True,label='Obesity'), - gr.inputs.Textbox(placeholder='Smoking', default="3", numeric=True,label='Smoking'), - gr.inputs.Textbox(placeholder='Passive_Smoker', default="2", numeric=True,label='Passive Smoker'), - gr.inputs.Textbox(placeholder='Chest_Pain', default="2", numeric=True,label='Chest Pain'), - gr.inputs.Textbox(placeholder='Coughing_of_Blood', default="4", numeric=True,label='Coughing of Blood'), - gr.inputs.Textbox(placeholder='Fatigue', default="3",numeric=True,label='Fatigue'), - gr.inputs.Textbox(placeholder='Weight_Loss', default="4", numeric=True,label='Weight Loss'), - gr.inputs.Textbox(placeholder='Shortness_of_Breath', default="2", numeric=True,label='Shortness of Breath'), - gr.inputs.Textbox(placeholder='Wheezing', default="2",numeric=True,label='Wheezing'), - gr.inputs.Textbox(placeholder='Swallowing_Difficulty', default="3", numeric=True,label='Swallowing Difficulty'), - gr.inputs.Textbox(placeholder='Clubbing_of_Finger_Nails', default="1", numeric=True,label='Clubbing of Finger Nails'), - gr.inputs.Textbox(placeholder='Frequent_Cold', default="2", numeric=True,label='Frequent Cold'), - gr.inputs.Textbox(placeholder='Dry_Cough', default="3", numeric=True,label='Dry Cough'), - gr.inputs.Textbox(placeholder='Snoring', default="4", numeric=True,label='Snoring'), - - ], - outputs = "text", - -) - -if __name__ == "__main__": - ui.launch(share=False) \ No newline at end of file diff --git a/spaces/VIPLab/Caption-Anything/app.py b/spaces/VIPLab/Caption-Anything/app.py deleted file mode 100644 index 53d8bfb5d2ae3c84cc6c030a77ba5b5f8d885a8f..0000000000000000000000000000000000000000 --- a/spaces/VIPLab/Caption-Anything/app.py +++ /dev/null @@ -1,599 +0,0 @@ -import os -import json -import gradio as gr -import numpy as np -from gradio import processing_utils - -from packaging import version -from PIL import Image, ImageDraw -import functools - -from caption_anything.model import CaptionAnything -from caption_anything.utils.image_editing_utils import create_bubble_frame -from caption_anything.utils.utils import mask_painter, seg_model_map, prepare_segmenter, image_resize -from caption_anything.utils.parser import parse_augment -from caption_anything.captioner import build_captioner -from caption_anything.text_refiner import build_text_refiner -from caption_anything.segmenter import build_segmenter -from caption_anything.utils.chatbot import ConversationBot, build_chatbot_tools, get_new_image_name -from segment_anything import sam_model_registry -import easyocr - -args = parse_augment() -args.segmenter = "huge" -args.segmenter_checkpoint = "sam_vit_h_4b8939.pth" -args.clip_filter = True -if args.segmenter_checkpoint is None: - _, segmenter_checkpoint = prepare_segmenter(args.segmenter) -else: - segmenter_checkpoint = args.segmenter_checkpoint - -shared_captioner = build_captioner(args.captioner, args.device, args) -shared_sam_model = sam_model_registry[seg_model_map[args.segmenter]](checkpoint=segmenter_checkpoint).to(args.device) -ocr_lang = ["ch_tra", "en"] -shared_ocr_reader = easyocr.Reader(ocr_lang) -tools_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.chat_tools_dict.split(',')} -shared_chatbot_tools = build_chatbot_tools(tools_dict) - - -class ImageSketcher(gr.Image): - """ - Fix the bug of gradio.Image that cannot upload with tool == 'sketch'. - """ - - is_template = True # Magic to make this work with gradio.Block, don't remove unless you know what you're doing. - - def __init__(self, **kwargs): - super().__init__(tool="sketch", **kwargs) - - def preprocess(self, x): - if self.tool == 'sketch' and self.source in ["upload", "webcam"]: - assert isinstance(x, dict) - if x['mask'] is None: - decode_image = processing_utils.decode_base64_to_image(x['image']) - width, height = decode_image.size - mask = np.zeros((height, width, 4), dtype=np.uint8) - mask[..., -1] = 255 - mask = self.postprocess(mask) - x['mask'] = mask - return super().preprocess(x) - - -def build_caption_anything_with_models(args, api_key="", captioner=None, sam_model=None, ocr_reader=None, text_refiner=None, - session_id=None): - segmenter = build_segmenter(args.segmenter, args.device, args, model=sam_model) - captioner = captioner - if session_id is not None: - print('Init caption anything for session {}'.format(session_id)) - return CaptionAnything(args, api_key, captioner=captioner, segmenter=segmenter, ocr_reader=ocr_reader, text_refiner=text_refiner) - - -def init_openai_api_key(api_key=""): - text_refiner = None - visual_chatgpt = None - if api_key and len(api_key) > 30: - try: - text_refiner = build_text_refiner(args.text_refiner, args.device, args, api_key) - assert len(text_refiner.llm('hi')) > 0 # test - visual_chatgpt = ConversationBot(shared_chatbot_tools, api_key) - except: - text_refiner = None - visual_chatgpt = None - openai_available = text_refiner is not None - if openai_available: - return [gr.update(visible=True)]*6 + [gr.update(visible=False)]*2 + [text_refiner, visual_chatgpt, None] - else: - return [gr.update(visible=False)]*6 + [gr.update(visible=True)]*2 + [text_refiner, visual_chatgpt, 'Your OpenAI API Key is not available'] - -def init_wo_openai_api_key(): - return [gr.update(visible=False)]*4 + [gr.update(visible=True)]*2 + [gr.update(visible=False)]*2 + [None, None, None] - -def get_click_prompt(chat_input, click_state, click_mode): - inputs = json.loads(chat_input) - if click_mode == 'Continuous': - points = click_state[0] - labels = click_state[1] - for input in inputs: - points.append(input[:2]) - labels.append(input[2]) - elif click_mode == 'Single': - points = [] - labels = [] - for input in inputs: - points.append(input[:2]) - labels.append(input[2]) - click_state[0] = points - click_state[1] = labels - else: - raise NotImplementedError - - prompt = { - "prompt_type": ["click"], - "input_point": click_state[0], - "input_label": click_state[1], - "multimask_output": "True", - } - return prompt - - -def update_click_state(click_state, caption, click_mode): - if click_mode == 'Continuous': - click_state[2].append(caption) - elif click_mode == 'Single': - click_state[2] = [caption] - else: - raise NotImplementedError - -def chat_input_callback(*args): - visual_chatgpt, chat_input, click_state, state, aux_state = args - if visual_chatgpt is not None: - return visual_chatgpt.run_text(chat_input, state, aux_state) - else: - response = "Text refiner is not initilzed, please input openai api key." - state = state + [(chat_input, response)] - return state, state - - - -def upload_callback(image_input, state, visual_chatgpt=None): - - if isinstance(image_input, dict): # if upload from sketcher_input, input contains image and mask - image_input, mask = image_input['image'], image_input['mask'] - - click_state = [[], [], []] - image_input = image_resize(image_input, res=1024) - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - session_id=iface.app_id - ) - model.segmenter.set_image(image_input) - image_embedding = model.image_embedding - original_size = model.original_size - input_size = model.input_size - - if visual_chatgpt is not None: - print('upload_callback: add caption to chatGPT memory') - new_image_path = get_new_image_name('chat_image', func_name='upload') - image_input.save(new_image_path) - visual_chatgpt.current_image = new_image_path - img_caption = model.captioner.inference(image_input, filter=False, args={'text_prompt':''})['caption'] - Human_prompt = f'\nHuman: The description of the image with path {new_image_path} is: {img_caption}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n' - AI_prompt = "Received." - visual_chatgpt.global_prompt = Human_prompt + 'AI: ' + AI_prompt - visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt - state = [(None, 'Received new image, resize it to width {} and height {}: '.format(image_input.size[0], image_input.size[1]))] - - return state, state, image_input, click_state, image_input, image_input, image_embedding, \ - original_size, input_size - - -def inference_click(image_input, point_prompt, click_mode, enable_wiki, language, sentiment, factuality, - length, image_embedding, state, click_state, original_size, input_size, text_refiner, visual_chatgpt, - evt: gr.SelectData): - click_index = evt.index - - if point_prompt == 'Positive': - coordinate = "[[{}, {}, 1]]".format(str(click_index[0]), str(click_index[1])) - else: - coordinate = "[[{}, {}, 0]]".format(str(click_index[0]), str(click_index[1])) - - prompt = get_click_prompt(coordinate, click_state, click_mode) - input_points = prompt['input_point'] - input_labels = prompt['input_label'] - - controls = {'length': length, - 'sentiment': sentiment, - 'factuality': factuality, - 'language': language} - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - text_refiner=text_refiner, - session_id=iface.app_id - ) - - model.setup(image_embedding, original_size, input_size, is_image_set=True) - - enable_wiki = True if enable_wiki in ['True', 'TRUE', 'true', True, 'Yes', 'YES', 'yes'] else False - out = model.inference(image_input, prompt, controls, disable_gpt=True, enable_wiki=enable_wiki, verbose=True, args={'clip_filter': False})[0] - - state = state + [("Image point: {}, Input label: {}".format(prompt["input_point"], prompt["input_label"]), None)] - state = state + [(None, "raw_caption: {}".format(out['generated_captions']['raw_caption']))] - update_click_state(click_state, out['generated_captions']['raw_caption'], click_mode) - text = out['generated_captions']['raw_caption'] - input_mask = np.array(out['mask'].convert('P')) - image_input = mask_painter(np.array(image_input), input_mask) - origin_image_input = image_input - image_input = create_bubble_frame(image_input, text, (click_index[0], click_index[1]), input_mask, - input_points=input_points, input_labels=input_labels) - x, y = input_points[-1] - - if visual_chatgpt is not None: - print('inference_click: add caption to chatGPT memory') - new_crop_save_path = get_new_image_name('chat_image', func_name='crop') - Image.open(out["crop_save_path"]).save(new_crop_save_path) - point_prompt = f'You should primarly use tools on the selected regional image (description: {text}, path: {new_crop_save_path}), which is a part of the whole image (path: {visual_chatgpt.current_image}). If human mentioned some objects not in the selected region, you can use tools on the whole image.' - visual_chatgpt.point_prompt = point_prompt - - yield state, state, click_state, image_input - if not args.disable_gpt and model.text_refiner: - refined_caption = model.text_refiner.inference(query=text, controls=controls, context=out['context_captions'], - enable_wiki=enable_wiki) - # new_cap = 'Original: ' + text + '. Refined: ' + refined_caption['caption'] - new_cap = refined_caption['caption'] - if refined_caption['wiki']: - state = state + [(None, "Wiki: {}".format(refined_caption['wiki']))] - state = state + [(None, f"caption: {new_cap}")] - refined_image_input = create_bubble_frame(origin_image_input, new_cap, (click_index[0], click_index[1]), - input_mask, - input_points=input_points, input_labels=input_labels) - yield state, state, click_state, refined_image_input - - -def get_sketch_prompt(mask: Image.Image): - """ - Get the prompt for the sketcher. - TODO: This is a temporary solution. We should cluster the sketch and get the bounding box of each cluster. - """ - - mask = np.asarray(mask)[..., 0] - - # Get the bounding box of the sketch - y, x = np.where(mask != 0) - x1, y1 = np.min(x), np.min(y) - x2, y2 = np.max(x), np.max(y) - - prompt = { - 'prompt_type': ['box'], - 'input_boxes': [ - [x1, y1, x2, y2] - ] - } - - return prompt - - -def inference_traject(sketcher_image, enable_wiki, language, sentiment, factuality, length, image_embedding, state, - original_size, input_size, text_refiner): - image_input, mask = sketcher_image['image'], sketcher_image['mask'] - - prompt = get_sketch_prompt(mask) - boxes = prompt['input_boxes'] - - controls = {'length': length, - 'sentiment': sentiment, - 'factuality': factuality, - 'language': language} - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - text_refiner=text_refiner, - session_id=iface.app_id - ) - - model.setup(image_embedding, original_size, input_size, is_image_set=True) - - enable_wiki = True if enable_wiki in ['True', 'TRUE', 'true', True, 'Yes', 'YES', 'yes'] else False - out = model.inference(image_input, prompt, controls, disable_gpt=True, enable_wiki=enable_wiki)[0] - - # Update components and states - state.append((f'Box: {boxes}', None)) - state.append((None, f'raw_caption: {out["generated_captions"]["raw_caption"]}')) - text = out['generated_captions']['raw_caption'] - input_mask = np.array(out['mask'].convert('P')) - image_input = mask_painter(np.array(image_input), input_mask) - - origin_image_input = image_input - - fake_click_index = (int((boxes[0][0] + boxes[0][2]) / 2), int((boxes[0][1] + boxes[0][3]) / 2)) - image_input = create_bubble_frame(image_input, text, fake_click_index, input_mask) - - yield state, state, image_input - - if not args.disable_gpt and model.text_refiner: - refined_caption = model.text_refiner.inference(query=text, controls=controls, context=out['context_captions'], - enable_wiki=enable_wiki) - - new_cap = refined_caption['caption'] - if refined_caption['wiki']: - state = state + [(None, "Wiki: {}".format(refined_caption['wiki']))] - state = state + [(None, f"caption: {new_cap}")] - refined_image_input = create_bubble_frame(origin_image_input, new_cap, fake_click_index, input_mask) - - yield state, state, refined_image_input - -def clear_chat_memory(visual_chatgpt, keep_global=False): - if visual_chatgpt is not None: - visual_chatgpt.memory.clear() - visual_chatgpt.point_prompt = "" - if keep_global: - visual_chatgpt.agent.memory.buffer = visual_chatgpt.global_prompt - else: - visual_chatgpt.current_image = None - visual_chatgpt.global_prompt = "" - -def cap_everything(image_input, visual_chatgpt, text_refiner): - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - text_refiner=text_refiner, - session_id=iface.app_id - ) - paragraph = model.inference_cap_everything(image_input, verbose=True) - # state = state + [(None, f"Caption Everything: {paragraph}")] - Human_prompt = f'\nThe description of the image with path {visual_chatgpt.current_image} is:\n{paragraph}\nThis information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n' - AI_prompt = "Received." - visual_chatgpt.global_prompt = Human_prompt + 'AI: ' + AI_prompt - visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt - return paragraph - - -def get_style(): - current_version = version.parse(gr.__version__) - if current_version <= version.parse('3.24.1'): - style = ''' - #image_sketcher{min-height:500px} - #image_sketcher [data-testid="image"], #image_sketcher [data-testid="image"] > div{min-height: 500px} - #image_upload{min-height:500px} - #image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 500px} - ''' - elif current_version <= version.parse('3.27'): - style = ''' - #image_sketcher{min-height:500px} - #image_upload{min-height:500px} - ''' - else: - style = None - - return style - - -def create_ui(): - title = """

Caption-Anything

- """ - description = """

Gradio demo for Caption Anything, image to dense captioning generation with various language styles. To use it, simply upload your image, or click one of the examples to load them. Code: https://github.com/ttengwang/Caption-Anything Duplicate Space

""" - - examples = [ - ["test_images/img35.webp"], - ["test_images/img2.jpg"], - ["test_images/img5.jpg"], - ["test_images/img12.jpg"], - ["test_images/img14.jpg"], - ["test_images/qingming3.jpeg"], - ["test_images/img1.jpg"], - ] - - with gr.Blocks( - css=get_style() - ) as iface: - state = gr.State([]) - click_state = gr.State([[], [], []]) - # chat_state = gr.State([]) - origin_image = gr.State(None) - image_embedding = gr.State(None) - text_refiner = gr.State(None) - visual_chatgpt = gr.State(None) - original_size = gr.State(None) - input_size = gr.State(None) - # img_caption = gr.State(None) - aux_state = gr.State([]) - - gr.Markdown(title) - gr.Markdown(description) - - with gr.Row(): - with gr.Column(scale=1.0): - with gr.Column(visible=False) as modules_not_need_gpt: - with gr.Tab("Click"): - image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload") - example_image = gr.Image(type="pil", interactive=False, visible=False) - with gr.Row(scale=1.0): - with gr.Row(scale=0.4): - point_prompt = gr.Radio( - choices=["Positive", "Negative"], - value="Positive", - label="Point Prompt", - interactive=True) - click_mode = gr.Radio( - choices=["Continuous", "Single"], - value="Continuous", - label="Clicking Mode", - interactive=True) - with gr.Row(scale=0.4): - clear_button_click = gr.Button(value="Clear Clicks", interactive=True) - clear_button_image = gr.Button(value="Clear Image", interactive=True) - with gr.Tab("Trajectory (beta)"): - sketcher_input = ImageSketcher(type="pil", interactive=True, brush_radius=20, - elem_id="image_sketcher") - with gr.Row(): - submit_button_sketcher = gr.Button(value="Submit", interactive=True) - - with gr.Column(visible=False) as modules_need_gpt1: - with gr.Row(scale=1.0): - language = gr.Dropdown( - ['English', 'Chinese', 'French', "Spanish", "Arabic", "Portuguese", "Cantonese"], - value="English", label="Language", interactive=True) - sentiment = gr.Radio( - choices=["Positive", "Natural", "Negative"], - value="Natural", - label="Sentiment", - interactive=True, - ) - with gr.Row(scale=1.0): - factuality = gr.Radio( - choices=["Factual", "Imagination"], - value="Factual", - label="Factuality", - interactive=True, - ) - length = gr.Slider( - minimum=10, - maximum=80, - value=10, - step=1, - interactive=True, - label="Generated Caption Length", - ) - enable_wiki = gr.Radio( - choices=["Yes", "No"], - value="No", - label="Enable Wiki", - interactive=True) - # with gr.Column(visible=True) as modules_not_need_gpt3: - gr.Examples( - examples=examples, - inputs=[example_image], - ) - with gr.Column(scale=0.5): - with gr.Column(visible=True) as module_key_input: - openai_api_key = gr.Textbox( - placeholder="Input openAI API key", - show_label=False, - label="OpenAI API Key", - lines=1, - type="password") - with gr.Row(scale=0.5): - enable_chatGPT_button = gr.Button(value="Run with ChatGPT", interactive=True, variant='primary') - disable_chatGPT_button = gr.Button(value="Run without ChatGPT (Faster)", interactive=True, - variant='primary') - with gr.Column(visible=False) as module_notification_box: - notification_box = gr.Textbox(lines=1, label="Notification", max_lines=5, show_label=False) - with gr.Column(visible=False) as modules_need_gpt2: - paragraph_output = gr.Textbox(lines=7, label="Describe Everything", max_lines=7) - with gr.Column(visible=False) as modules_need_gpt0: - cap_everything_button = gr.Button(value="Caption Everything in a Paragraph", interactive=True) - with gr.Column(visible=False) as modules_not_need_gpt2: - chatbot = gr.Chatbot(label="Chatbox", ).style(height=550, scale=0.5) - with gr.Column(visible=False) as modules_need_gpt3: - chat_input = gr.Textbox(show_label=False, placeholder="Enter text and press Enter").style( - container=False) - with gr.Row(): - clear_button_text = gr.Button(value="Clear Text", interactive=True) - submit_button_text = gr.Button(value="Submit", interactive=True, variant="primary") - - openai_api_key.submit(init_openai_api_key, inputs=[openai_api_key], - outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, modules_not_need_gpt, - modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box]) - enable_chatGPT_button.click(init_openai_api_key, inputs=[openai_api_key], - outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, - modules_not_need_gpt, - modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box]) - disable_chatGPT_button.click(init_wo_openai_api_key, - outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, - modules_not_need_gpt, - modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box]) - - enable_chatGPT_button.click( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - openai_api_key.submit( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - - cap_everything_button.click(cap_everything, [origin_image, visual_chatgpt, text_refiner], [paragraph_output]) - - clear_button_click.click( - lambda x: ([[], [], []], x), - [origin_image], - [click_state, image_input], - queue=False, - show_progress=False - ) - clear_button_click.click(functools.partial(clear_chat_memory, keep_global=True), inputs=[visual_chatgpt]) - clear_button_image.click( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - clear_button_image.click(clear_chat_memory, inputs=[visual_chatgpt]) - clear_button_text.click( - lambda: ([], [], [[], [], [], []]), - [], - [chatbot, state, click_state], - queue=False, - show_progress=False - ) - clear_button_text.click(clear_chat_memory, inputs=[visual_chatgpt]) - - image_input.clear( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - - image_input.clear(clear_chat_memory, inputs=[visual_chatgpt]) - - - image_input.upload(upload_callback, [image_input, state, visual_chatgpt], - [chatbot, state, origin_image, click_state, image_input, sketcher_input, - image_embedding, original_size, input_size]) - sketcher_input.upload(upload_callback, [sketcher_input, state, visual_chatgpt], - [chatbot, state, origin_image, click_state, image_input, sketcher_input, - image_embedding, original_size, input_size]) - chat_input.submit(chat_input_callback, [visual_chatgpt, chat_input, click_state, state, aux_state], - [chatbot, state, aux_state]) - chat_input.submit(lambda: "", None, chat_input) - submit_button_text.click(chat_input_callback, [visual_chatgpt, chat_input, click_state, state, aux_state], - [chatbot, state, aux_state]) - submit_button_text.click(lambda: "", None, chat_input) - example_image.change(upload_callback, [example_image, state, visual_chatgpt], - [chatbot, state, origin_image, click_state, image_input, sketcher_input, - image_embedding, original_size, input_size]) - example_image.change(clear_chat_memory, inputs=[visual_chatgpt]) - # select coordinate - image_input.select( - inference_click, - inputs=[ - origin_image, point_prompt, click_mode, enable_wiki, language, sentiment, factuality, length, - image_embedding, state, click_state, original_size, input_size, text_refiner, visual_chatgpt - ], - outputs=[chatbot, state, click_state, image_input], - show_progress=False, queue=True - ) - - submit_button_sketcher.click( - inference_traject, - inputs=[ - sketcher_input, enable_wiki, language, sentiment, factuality, length, image_embedding, state, - original_size, input_size, text_refiner - ], - outputs=[chatbot, state, sketcher_input], - show_progress=False, queue=True - ) - - return iface - - -if __name__ == '__main__': - iface = create_ui() - iface.queue(concurrency_count=5, api_open=False, max_size=10) - iface.launch(server_name="0.0.0.0", enable_queue=True) diff --git a/spaces/VoiceHero69/changer/webui/modules/__init__.py b/spaces/VoiceHero69/changer/webui/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/XzJosh/Jiaran-Bert-VITS2/data_utils.py b/spaces/XzJosh/Jiaran-Bert-VITS2/data_utils.py deleted file mode 100644 index be3a29a93188c5b3386f22e5db29e5e96d78109a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Jiaran-Bert-VITS2/data_utils.py +++ /dev/null @@ -1,321 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data -import commons -from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import cleaned_text_to_sequence, get_bert - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.spk_map = hparams.spk2id - self.hparams = hparams - - self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False) - if self.use_mel_spec_posterior: - self.n_mel_channels = getattr(hparams, "n_mel_channels", 80) - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 300) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - skipped = 0 - for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text: - audiopath = f'{_id}' - if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len: - phones = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - else: - skipped += 1 - print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text - - bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath) - - spec, wav = self.get_audio(audiopath) - sid = torch.LongTensor([int(self.spk_map[sid])]) - return (phones, spec, wav, sid, tone, language, bert) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if self.use_mel_spec_posterior: - spec_filename = spec_filename.replace(".spec.pt", ".mel.pt") - try: - spec = torch.load(spec_filename) - except: - if self.use_mel_spec_posterior: - spec = mel_spectrogram_torch(audio_norm, self.filter_length, - self.n_mel_channels, self.sampling_rate, self.hop_length, - self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text, word2ph, phone, tone, language_str, wav_path): - pold = phone - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - pold2 = phone - - if self.add_blank: - p1 = len(phone) - phone = commons.intersperse(phone, 0) - p2 = len(phone) - t1 = len(tone) - tone = commons.intersperse(tone, 0) - t2 = len(tone) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - torch.save(bert, bert_path) - #print(bert.shape[-1], bert_path, text, pold) - assert bert.shape[-1] == len(phone) - - assert bert.shape[-1] == len(phone), ( - bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho) - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, phone, tone, language - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - tone_padded = torch.LongTensor(len(batch), max_text_len) - language_padded = torch.LongTensor(len(batch), max_text_len) - bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len) - - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - tone_padded.zero_() - language_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - bert_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - tone = row[4] - tone_padded[i, :tone.size(0)] = tone - - language = row[5] - language_padded[i, :language.size(0)] = language - - bert = row[6] - bert_padded[i, :, :bert.size(1)] = bert - - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - if (len_bucket == 0): - continue - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/XzJosh/Jiaran-Bert-VITS2/text/tone_sandhi.py b/spaces/XzJosh/Jiaran-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Jiaran-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/XzJosh/TianDou-Bert-VITS2/bert_gen.py b/spaces/XzJosh/TianDou-Bert-VITS2/bert_gen.py deleted file mode 100644 index 44814715396ffc3abe84a12c74d66293c356eb4f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/TianDou-Bert-VITS2/bert_gen.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -from torch.utils.data import DataLoader -from multiprocessing import Pool -import commons -import utils -from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate -from tqdm import tqdm -import warnings - -from text import cleaned_text_to_sequence, get_bert - -config_path = 'configs/config.json' -hps = utils.get_hparams_from_file(config_path) - -def process_line(line): - _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|") - phone = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - wav_path = f'{_id}' - - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - assert bert.shape[-1] == len(phone) - torch.save(bert, bert_path) - - -if __name__ == '__main__': - lines = [] - with open(hps.data.training_files, encoding='utf-8' ) as f: - lines.extend(f.readlines()) - - with open(hps.data.validation_files, encoding='utf-8' ) as f: - lines.extend(f.readlines()) - - with Pool(processes=12) as pool: #A100 40GB suitable config,if coom,please decrease the processess number. - for _ in tqdm(pool.imap_unordered(process_line, lines)): - pass diff --git a/spaces/YE01/saya-vits/commons.py b/spaces/YE01/saya-vits/commons.py deleted file mode 100644 index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000 --- a/spaces/YE01/saya-vits/commons.py +++ /dev/null @@ -1,161 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py deleted file mode 100644 index da9b324f1582e31d1a16d2fe462ac2989bea56ea..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import os -import sys -import tempfile -from contextlib import ExitStack, contextmanager -from copy import deepcopy -from unittest import mock -import torch -from torch import nn - -# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964 -import detectron2 # noqa F401 -from detectron2.structures import Boxes, Instances -from detectron2.utils.env import _import_file - -_counter = 0 - - -def _clear_jit_cache(): - from torch.jit._recursive import concrete_type_store - from torch.jit._state import _jit_caching_layer - - concrete_type_store.type_store.clear() # for modules - _jit_caching_layer.clear() # for free functions - - -def _add_instances_conversion_methods(newInstances): - """ - Add from_instances methods to the scripted Instances class. - """ - cls_name = newInstances.__name__ - - @torch.jit.unused - def from_instances(instances: Instances): - """ - Create scripted Instances from original Instances - """ - fields = instances.get_fields() - image_size = instances.image_size - ret = newInstances(image_size) - for name, val in fields.items(): - assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}" - setattr(ret, name, deepcopy(val)) - return ret - - newInstances.from_instances = from_instances - - -@contextmanager -def patch_instances(fields): - """ - A contextmanager, under which the Instances class in detectron2 is replaced - by a statically-typed scriptable class, defined by `fields`. - See more in `scripting_with_instances`. - """ - - with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile( - mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False - ) as f: - try: - # Objects that use Instances should not reuse previously-compiled - # results in cache, because `Instances` could be a new class each time. - _clear_jit_cache() - - cls_name, s = _gen_instance_module(fields) - f.write(s) - f.flush() - f.close() - - module = _import(f.name) - new_instances = getattr(module, cls_name) - _ = torch.jit.script(new_instances) - # let torchscript think Instances was scripted already - Instances.__torch_script_class__ = True - # let torchscript find new_instances when looking for the jit type of Instances - Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances) - - _add_instances_conversion_methods(new_instances) - yield new_instances - finally: - try: - del Instances.__torch_script_class__ - del Instances._jit_override_qualname - except AttributeError: - pass - sys.modules.pop(module.__name__) - - -def _gen_instance_class(fields): - """ - Args: - fields (dict[name: type]) - """ - - class _FieldType: - def __init__(self, name, type_): - assert isinstance(name, str), f"Field name must be str, got {name}" - self.name = name - self.type_ = type_ - self.annotation = f"{type_.__module__}.{type_.__name__}" - - fields = [_FieldType(k, v) for k, v in fields.items()] - - def indent(level, s): - return " " * 4 * level + s - - lines = [] - - global _counter - _counter += 1 - - cls_name = "ScriptedInstances{}".format(_counter) - - field_names = tuple(x.name for x in fields) - extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields]) - lines.append( - f""" -class {cls_name}: - def __init__(self, image_size: Tuple[int, int], {extra_args}): - self.image_size = image_size - self._field_names = {field_names} -""" - ) - - for f in fields: - lines.append( - indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})") - ) - - for f in fields: - lines.append( - f""" - @property - def {f.name}(self) -> {f.annotation}: - # has to use a local for type refinement - # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement - t = self._{f.name} - assert t is not None, "{f.name} is None and cannot be accessed!" - return t - - @{f.name}.setter - def {f.name}(self, value: {f.annotation}) -> None: - self._{f.name} = value -""" - ) - - # support method `__len__` - lines.append( - """ - def __len__(self) -> int: -""" - ) - for f in fields: - lines.append( - f""" - t = self._{f.name} - if t is not None: - return len(t) -""" - ) - lines.append( - """ - raise NotImplementedError("Empty Instances does not support __len__!") -""" - ) - - # support method `has` - lines.append( - """ - def has(self, name: str) -> bool: -""" - ) - for f in fields: - lines.append( - f""" - if name == "{f.name}": - return self._{f.name} is not None -""" - ) - lines.append( - """ - return False -""" - ) - - # support method `to` - none_args = ", None" * len(fields) - lines.append( - f""" - def to(self, device: torch.device) -> "{cls_name}": - ret = {cls_name}(self.image_size{none_args}) -""" - ) - for f in fields: - if hasattr(f.type_, "to"): - lines.append( - f""" - t = self._{f.name} - if t is not None: - ret._{f.name} = t.to(device) -""" - ) - else: - # For now, ignore fields that cannot be moved to devices. - # Maybe can support other tensor-like classes (e.g. __torch_function__) - pass - lines.append( - """ - return ret -""" - ) - - # support method `getitem` - none_args = ", None" * len(fields) - lines.append( - f""" - def __getitem__(self, item) -> "{cls_name}": - ret = {cls_name}(self.image_size{none_args}) -""" - ) - for f in fields: - lines.append( - f""" - t = self._{f.name} - if t is not None: - ret._{f.name} = t[item] -""" - ) - lines.append( - """ - return ret -""" - ) - - # support method `cat` - # this version does not contain checks that all instances have same size and fields - none_args = ", None" * len(fields) - lines.append( - f""" - def cat(self, instances: List["{cls_name}"]) -> "{cls_name}": - ret = {cls_name}(self.image_size{none_args}) -""" - ) - for f in fields: - lines.append( - f""" - t = self._{f.name} - if t is not None: - values: List[{f.annotation}] = [x.{f.name} for x in instances] - if torch.jit.isinstance(t, torch.Tensor): - ret._{f.name} = torch.cat(values, dim=0) - else: - ret._{f.name} = t.cat(values) -""" - ) - lines.append( - """ - return ret""" - ) - - # support method `get_fields()` - lines.append( - """ - def get_fields(self) -> Dict[str, Tensor]: - ret = {} - """ - ) - for f in fields: - if f.type_ == Boxes: - stmt = "t.tensor" - elif f.type_ == torch.Tensor: - stmt = "t" - else: - stmt = f'assert False, "unsupported type {str(f.type_)}"' - lines.append( - f""" - t = self._{f.name} - if t is not None: - ret["{f.name}"] = {stmt} - """ - ) - lines.append( - """ - return ret""" - ) - return cls_name, os.linesep.join(lines) - - -def _gen_instance_module(fields): - # TODO: find a more automatic way to enable import of other classes - s = """ -from copy import deepcopy -import torch -from torch import Tensor -import typing -from typing import * - -import detectron2 -from detectron2.structures import Boxes, Instances - -""" - - cls_name, cls_def = _gen_instance_class(fields) - s += cls_def - return cls_name, s - - -def _import(path): - return _import_file( - "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True - ) - - -@contextmanager -def patch_builtin_len(modules=()): - """ - Patch the builtin len() function of a few detectron2 modules - to use __len__ instead, because __len__ does not convert values to - integers and therefore is friendly to tracing. - - Args: - modules (list[stsr]): names of extra modules to patch len(), in - addition to those in detectron2. - """ - - def _new_len(obj): - return obj.__len__() - - with ExitStack() as stack: - MODULES = [ - "detectron2.modeling.roi_heads.fast_rcnn", - "detectron2.modeling.roi_heads.mask_head", - "detectron2.modeling.roi_heads.keypoint_head", - ] + list(modules) - ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES] - for m in ctxs: - m.side_effect = _new_len - yield - - -def patch_nonscriptable_classes(): - """ - Apply patches on a few nonscriptable detectron2 classes. - Should not have side-effects on eager usage. - """ - # __prepare_scriptable__ can also be added to models for easier maintenance. - # But it complicates the clean model code. - - from detectron2.modeling.backbone import ResNet, FPN - - # Due to https://github.com/pytorch/pytorch/issues/36061, - # we change backbone to use ModuleList for scripting. - # (note: this changes param names in state_dict) - - def prepare_resnet(self): - ret = deepcopy(self) - ret.stages = nn.ModuleList(ret.stages) - for k in self.stage_names: - delattr(ret, k) - return ret - - ResNet.__prepare_scriptable__ = prepare_resnet - - def prepare_fpn(self): - ret = deepcopy(self) - ret.lateral_convs = nn.ModuleList(ret.lateral_convs) - ret.output_convs = nn.ModuleList(ret.output_convs) - for name, _ in self.named_children(): - if name.startswith("fpn_"): - delattr(ret, name) - return ret - - FPN.__prepare_scriptable__ = prepare_fpn - - # Annotate some attributes to be constants for the purpose of scripting, - # even though they are not constants in eager mode. - from detectron2.modeling.roi_heads import StandardROIHeads - - if hasattr(StandardROIHeads, "__annotations__"): - # copy first to avoid editing annotations of base class - StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__) - StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool] - StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool] - - -# These patches are not supposed to have side-effects. -patch_nonscriptable_classes() - - -@contextmanager -def freeze_training_mode(model): - """ - A context manager that annotates the "training" attribute of every submodule - to constant, so that the training codepath in these modules can be - meta-compiled away. Upon exiting, the annotations are reverted. - """ - classes = {type(x) for x in model.modules()} - # __constants__ is the old way to annotate constants and not compatible - # with __annotations__ . - classes = {x for x in classes if not hasattr(x, "__constants__")} - for cls in classes: - cls.__annotations__["training"] = torch.jit.Final[bool] - yield - for cls in classes: - cls.__annotations__["training"] = bool diff --git a/spaces/aadnk/whisper-webui/src/prompts/jsonPromptStrategy.py b/spaces/aadnk/whisper-webui/src/prompts/jsonPromptStrategy.py deleted file mode 100644 index 25aa938adc3c0d5776cd11e0d123195bb6e69aeb..0000000000000000000000000000000000000000 --- a/spaces/aadnk/whisper-webui/src/prompts/jsonPromptStrategy.py +++ /dev/null @@ -1,49 +0,0 @@ -import json -from typing import Dict -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - - -class JsonPromptSegment(): - def __init__(self, segment_index: int, prompt: str, format_prompt: bool = False): - self.prompt = prompt - self.segment_index = segment_index - self.format_prompt = format_prompt - -class JsonPromptStrategy(AbstractPromptStrategy): - def __init__(self, initial_json_prompt: str): - """ - Parameters - ---------- - initial_json_prompt: str - The initial prompts for each segment in JSON form. - - Format: - [ - {"segment_index": 0, "prompt": "Hello, how are you?"}, - {"segment_index": 1, "prompt": "I'm doing well, how are you?"}, - {"segment_index": 2, "prompt": "{0} Fine, thank you.", "format_prompt": true} - ] - - """ - parsed_json = json.loads(initial_json_prompt) - self.segment_lookup: Dict[str, JsonPromptSegment] = dict() - - for prompt_entry in parsed_json: - segment_index = prompt_entry["segment_index"] - prompt = prompt_entry["prompt"] - format_prompt = prompt_entry.get("format_prompt", False) - self.segment_lookup[str(segment_index)] = JsonPromptSegment(segment_index, prompt, format_prompt) - - def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str: - # Lookup prompt - prompt = self.segment_lookup.get(str(segment_index), None) - - if (prompt is None): - # No prompt found, return whisper prompt - print(f"Could not find prompt for segment {segment_index}, returning whisper prompt") - return whisper_prompt - - if (prompt.format_prompt): - return prompt.prompt.format(whisper_prompt) - else: - return self._concat_prompt(prompt.prompt, whisper_prompt) diff --git a/spaces/aaronayitey/Streamlit-app/README.md b/spaces/aaronayitey/Streamlit-app/README.md deleted file mode 100644 index f15d40b9a0263e5d85a17f4e0ace478fa18f308b..0000000000000000000000000000000000000000 --- a/spaces/aaronayitey/Streamlit-app/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Streamlit App -emoji: 🏆 -colorFrom: gray -colorTo: red -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/assigners/max_iou_assigner.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/assigners/max_iou_assigner.py deleted file mode 100644 index 5cf4c4b4b450f87dfb99c3d33d8ed83d3e5cfcb3..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/assigners/max_iou_assigner.py +++ /dev/null @@ -1,212 +0,0 @@ -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class MaxIoUAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `-1`, or a semi-positive integer - indicating the ground truth index. - - - -1: negative sample, no assigned gt - - semi-positive integer: positive sample, index (0-based) of assigned gt - - Args: - pos_iou_thr (float): IoU threshold for positive bboxes. - neg_iou_thr (float or tuple): IoU threshold for negative bboxes. - min_pos_iou (float): Minimum iou for a bbox to be considered as a - positive bbox. Positive samples can have smaller IoU than - pos_iou_thr due to the 4th step (assign max IoU sample to each gt). - gt_max_assign_all (bool): Whether to assign all bboxes with the same - highest overlap with some gt to that gt. - ignore_iof_thr (float): IoF threshold for ignoring bboxes (if - `gt_bboxes_ignore` is specified). Negative values mean not - ignoring any bboxes. - ignore_wrt_candidates (bool): Whether to compute the iof between - `bboxes` and `gt_bboxes_ignore`, or the contrary. - match_low_quality (bool): Whether to allow low quality matches. This is - usually allowed for RPN and single stage detectors, but not allowed - in the second stage. Details are demonstrated in Step 4. - gpu_assign_thr (int): The upper bound of the number of GT for GPU - assign. When the number of gt is above this threshold, will assign - on CPU device. Negative values mean not assign on CPU. - """ - - def __init__(self, - pos_iou_thr, - neg_iou_thr, - min_pos_iou=.0, - gt_max_assign_all=True, - ignore_iof_thr=-1, - ignore_wrt_candidates=True, - match_low_quality=True, - gpu_assign_thr=-1, - iou_calculator=dict(type='BboxOverlaps2D')): - self.pos_iou_thr = pos_iou_thr - self.neg_iou_thr = neg_iou_thr - self.min_pos_iou = min_pos_iou - self.gt_max_assign_all = gt_max_assign_all - self.ignore_iof_thr = ignore_iof_thr - self.ignore_wrt_candidates = ignore_wrt_candidates - self.gpu_assign_thr = gpu_assign_thr - self.match_low_quality = match_low_quality - self.iou_calculator = build_iou_calculator(iou_calculator) - - def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign gt to bboxes. - - This method assign a gt bbox to every bbox (proposal/anchor), each bbox - will be assigned with -1, or a semi-positive number. -1 means negative - sample, semi-positive number is the index (0-based) of assigned gt. - The assignment is done in following steps, the order matters. - - 1. assign every bbox to the background - 2. assign proposals whose iou with all gts < neg_iou_thr to 0 - 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, - assign it to that bbox - 4. for each gt bbox, assign its nearest proposals (may be more than - one) to itself - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - - Example: - >>> self = MaxIoUAssigner(0.5, 0.5) - >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) - >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) - >>> assign_result = self.assign(bboxes, gt_bboxes) - >>> expected_gt_inds = torch.LongTensor([1, 0]) - >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) - """ - assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( - gt_bboxes.shape[0] > self.gpu_assign_thr) else False - # compute overlap and assign gt on CPU when number of GT is large - if assign_on_cpu: - device = bboxes.device - bboxes = bboxes.cpu() - gt_bboxes = gt_bboxes.cpu() - if gt_bboxes_ignore is not None: - gt_bboxes_ignore = gt_bboxes_ignore.cpu() - if gt_labels is not None: - gt_labels = gt_labels.cpu() - - overlaps = self.iou_calculator(gt_bboxes, bboxes) - - if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): - if self.ignore_wrt_candidates: - ignore_overlaps = self.iou_calculator( - bboxes, gt_bboxes_ignore, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - else: - ignore_overlaps = self.iou_calculator( - gt_bboxes_ignore, bboxes, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) - overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 - - assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) - if assign_on_cpu: - assign_result.gt_inds = assign_result.gt_inds.to(device) - assign_result.max_overlaps = assign_result.max_overlaps.to(device) - if assign_result.labels is not None: - assign_result.labels = assign_result.labels.to(device) - return assign_result - - def assign_wrt_overlaps(self, overlaps, gt_labels=None): - """Assign w.r.t. the overlaps of bboxes with gts. - - Args: - overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, - shape(k, n). - gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) - - # 1. assign -1 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - if num_gts == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, - assigned_gt_inds, - max_overlaps, - labels=assigned_labels) - - # for each anchor, which gt best overlaps with it - # for each anchor, the max iou of all gts - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - # for each gt, which anchor best overlaps with it - # for each gt, the max iou of all proposals - gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) - - # 2. assign negative: below - # the negative inds are set to be 0 - if isinstance(self.neg_iou_thr, float): - assigned_gt_inds[(max_overlaps >= 0) - & (max_overlaps < self.neg_iou_thr)] = 0 - elif isinstance(self.neg_iou_thr, tuple): - assert len(self.neg_iou_thr) == 2 - assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) - & (max_overlaps < self.neg_iou_thr[1])] = 0 - - # 3. assign positive: above positive IoU threshold - pos_inds = max_overlaps >= self.pos_iou_thr - assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 - - if self.match_low_quality: - # Low-quality matching will overwrite the assigned_gt_inds assigned - # in Step 3. Thus, the assigned gt might not be the best one for - # prediction. - # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, - # bbox 1 will be assigned as the best target for bbox A in step 3. - # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's - # assigned_gt_inds will be overwritten to be bbox B. - # This might be the reason that it is not used in ROI Heads. - for i in range(num_gts): - if gt_max_overlaps[i] >= self.min_pos_iou: - if self.gt_max_assign_all: - max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] - assigned_gt_inds[max_iou_inds] = i + 1 - else: - assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - - return AssignResult( - num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/assigners/center_region_assigner.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/assigners/center_region_assigner.py deleted file mode 100644 index 488e3b615318787751cab3211e38dd9471c666be..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/assigners/center_region_assigner.py +++ /dev/null @@ -1,335 +0,0 @@ -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -def scale_boxes(bboxes, scale): - """Expand an array of boxes by a given scale. - - Args: - bboxes (Tensor): Shape (m, 4) - scale (float): The scale factor of bboxes - - Returns: - (Tensor): Shape (m, 4). Scaled bboxes - """ - assert bboxes.size(1) == 4 - w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 - h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 - x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 - y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 - - w_half *= scale - h_half *= scale - - boxes_scaled = torch.zeros_like(bboxes) - boxes_scaled[:, 0] = x_c - w_half - boxes_scaled[:, 2] = x_c + w_half - boxes_scaled[:, 1] = y_c - h_half - boxes_scaled[:, 3] = y_c + h_half - return boxes_scaled - - -def is_located_in(points, bboxes): - """Are points located in bboxes. - - Args: - points (Tensor): Points, shape: (m, 2). - bboxes (Tensor): Bounding boxes, shape: (n, 4). - - Return: - Tensor: Flags indicating if points are located in bboxes, shape: (m, n). - """ - assert points.size(1) == 2 - assert bboxes.size(1) == 4 - return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ - (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ - (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ - (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) - - -def bboxes_area(bboxes): - """Compute the area of an array of bboxes. - - Args: - bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) - - Returns: - Tensor: Area of the bboxes. Shape: (m, ) - """ - assert bboxes.size(1) == 4 - w = (bboxes[:, 2] - bboxes[:, 0]) - h = (bboxes[:, 3] - bboxes[:, 1]) - areas = w * h - return areas - - -@BBOX_ASSIGNERS.register_module() -class CenterRegionAssigner(BaseAssigner): - """Assign pixels at the center region of a bbox as positive. - - Each proposals will be assigned with `-1`, `0`, or a positive integer - indicating the ground truth index. - - -1: negative samples - - semi-positive numbers: positive sample, index (0-based) of assigned gt - - Args: - pos_scale (float): Threshold within which pixels are - labelled as positive. - neg_scale (float): Threshold above which pixels are - labelled as positive. - min_pos_iof (float): Minimum iof of a pixel with a gt to be - labelled as positive. Default: 1e-2 - ignore_gt_scale (float): Threshold within which the pixels - are ignored when the gt is labelled as shadowed. Default: 0.5 - foreground_dominate (bool): If True, the bbox will be assigned as - positive when a gt's kernel region overlaps with another's shadowed - (ignored) region, otherwise it is set as ignored. Default to False. - """ - - def __init__(self, - pos_scale, - neg_scale, - min_pos_iof=1e-2, - ignore_gt_scale=0.5, - foreground_dominate=False, - iou_calculator=dict(type='BboxOverlaps2D')): - self.pos_scale = pos_scale - self.neg_scale = neg_scale - self.min_pos_iof = min_pos_iof - self.ignore_gt_scale = ignore_gt_scale - self.foreground_dominate = foreground_dominate - self.iou_calculator = build_iou_calculator(iou_calculator) - - def get_gt_priorities(self, gt_bboxes): - """Get gt priorities according to their areas. - - Smaller gt has higher priority. - - Args: - gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). - - Returns: - Tensor: The priority of gts so that gts with larger priority is \ - more likely to be assigned. Shape (k, ) - """ - gt_areas = bboxes_area(gt_bboxes) - # Rank all gt bbox areas. Smaller objects has larger priority - _, sort_idx = gt_areas.sort(descending=True) - sort_idx = sort_idx.argsort() - return sort_idx - - def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign gt to bboxes. - - This method assigns gts to every bbox (proposal/anchor), each bbox \ - will be assigned with -1, or a semi-positive number. -1 means \ - negative sample, semi-positive number is the index (0-based) of \ - assigned gt. - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). - - Returns: - :obj:`AssignResult`: The assigned result. Note that \ - shadowed_labels of shape (N, 2) is also added as an \ - `assign_result` attribute. `shadowed_labels` is a tensor \ - composed of N pairs of anchor_ind, class_label], where N \ - is the number of anchors that lie in the outer region of a \ - gt, anchor_ind is the shadowed anchor index and class_label \ - is the shadowed class label. - - Example: - >>> self = CenterRegionAssigner(0.2, 0.2) - >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) - >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) - >>> assign_result = self.assign(bboxes, gt_bboxes) - >>> expected_gt_inds = torch.LongTensor([1, 0]) - >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) - """ - # There are in total 5 steps in the pixel assignment - # 1. Find core (the center region, say inner 0.2) - # and shadow (the relatively ourter part, say inner 0.2-0.5) - # regions of every gt. - # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions - # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in - # the image. - # 3.1. For overlapping objects, the prior bboxes in gt_core is - # assigned with the object with smallest area - # 4. Assign prior bboxes with class label according to its gt id. - # 4.1. Assign -1 to prior bboxes lying in shadowed gts - # 4.2. Assign positive prior boxes with the corresponding label - # 5. Find pixels lying in the shadow of an object and assign them with - # background label, but set the loss weight of its corresponding - # gt to zero. - assert bboxes.size(1) == 4, 'bboxes must have size of 4' - # 1. Find core positive and shadow region of every gt - gt_core = scale_boxes(gt_bboxes, self.pos_scale) - gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) - - # 2. Find prior bboxes that lie in gt_core and gt_shadow regions - bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 - # The center points lie within the gt boxes - is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) - # Only calculate bbox and gt_core IoF. This enables small prior bboxes - # to match large gts - bbox_and_gt_core_overlaps = self.iou_calculator( - bboxes, gt_core, mode='iof') - # The center point of effective priors should be within the gt box - is_bbox_in_gt_core = is_bbox_in_gt & ( - bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) - - is_bbox_in_gt_shadow = ( - self.iou_calculator(bboxes, gt_shadow, mode='iof') > - self.min_pos_iof) - # Rule out center effective positive pixels - is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) - - num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - if num_gts == 0 or num_bboxes == 0: - # If no gts exist, assign all pixels to negative - assigned_gt_ids = \ - is_bbox_in_gt_core.new_zeros((num_bboxes,), - dtype=torch.long) - pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) - else: - # Step 3: assign a one-hot gt id to each pixel, and smaller objects - # have high priority to assign the pixel. - sort_idx = self.get_gt_priorities(gt_bboxes) - assigned_gt_ids, pixels_in_gt_shadow = \ - self.assign_one_hot_gt_indices(is_bbox_in_gt_core, - is_bbox_in_gt_shadow, - gt_priority=sort_idx) - - if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: - # No ground truth or boxes, return empty assignment - gt_bboxes_ignore = scale_boxes( - gt_bboxes_ignore, scale=self.ignore_gt_scale) - is_bbox_in_ignored_gts = is_located_in(bbox_centers, - gt_bboxes_ignore) - is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) - assigned_gt_ids[is_bbox_in_ignored_gts] = -1 - - # 4. Assign prior bboxes with class label according to its gt id. - assigned_labels = None - shadowed_pixel_labels = None - if gt_labels is not None: - # Default assigned label is the background (-1) - assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_ids > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - - 1] - # 5. Find pixels lying in the shadow of an object - shadowed_pixel_labels = pixels_in_gt_shadow.clone() - if pixels_in_gt_shadow.numel() > 0: - pixel_idx, gt_idx =\ - pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] - assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ - 'Some pixels are dually assigned to ignore and gt!' - shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] - override = ( - assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) - if self.foreground_dominate: - # When a pixel is both positive and shadowed, set it as pos - shadowed_pixel_labels = shadowed_pixel_labels[~override] - else: - # When a pixel is both pos and shadowed, set it as shadowed - assigned_labels[pixel_idx[override]] = -1 - assigned_gt_ids[pixel_idx[override]] = 0 - - assign_result = AssignResult( - num_gts, assigned_gt_ids, None, labels=assigned_labels) - # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) - assign_result.set_extra_property('shadowed_labels', - shadowed_pixel_labels) - return assign_result - - def assign_one_hot_gt_indices(self, - is_bbox_in_gt_core, - is_bbox_in_gt_shadow, - gt_priority=None): - """Assign only one gt index to each prior box. - - Gts with large gt_priority are more likely to be assigned. - - Args: - is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center - is in the core area of a gt (e.g. 0-0.2). - Shape: (num_prior, num_gt). - is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox - center is in the shadowed area of a gt (e.g. 0.2-0.5). - Shape: (num_prior, num_gt). - gt_priority (Tensor): Priorities of gts. The gt with a higher - priority is more likely to be assigned to the bbox when the bbox - match with multiple gts. Shape: (num_gt, ). - - Returns: - tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - - - assigned_gt_inds: The assigned gt index of each prior bbox \ - (i.e. index from 1 to num_gts). Shape: (num_prior, ). - - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ - shape (num_ignore, 2) with first column being the \ - shadowed prior bbox indices and the second column the \ - shadowed gt indices (1-based). - """ - num_bboxes, num_gts = is_bbox_in_gt_core.shape - - if gt_priority is None: - gt_priority = torch.arange( - num_gts, device=is_bbox_in_gt_core.device) - assert gt_priority.size(0) == num_gts - # The bigger gt_priority, the more preferable to be assigned - # The assigned inds are by default 0 (background) - assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), - dtype=torch.long) - # Shadowed bboxes are assigned to be background. But the corresponding - # label is ignored during loss calculation, which is done through - # shadowed_gt_inds - shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) - if is_bbox_in_gt_core.sum() == 0: # No gt match - shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue - return assigned_gt_inds, shadowed_gt_inds - - # The priority of each prior box and gt pair. If one prior box is - # matched bo multiple gts. Only the pair with the highest priority - # is saved - pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), - -1, - dtype=torch.long) - - # Each bbox could match with multiple gts. - # The following codes deal with this situation - # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) - inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) - # The matched gt index of each positive bbox. Length >= num_pos_anchor - # , since one bbox could match multiple gts - matched_bbox_gt_inds = torch.nonzero( - is_bbox_in_gt_core, as_tuple=False)[:, 1] - # Assign priority to each bbox-gt pair. - pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] - _, argmax_priority = pair_priority[inds_of_match].max(dim=1) - assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based - # Zero-out the assigned anchor box to filter the shadowed gt indices - is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 - # Concat the shadowed indices due to overlapping with that out side of - # effective scale. shape: (total_num_ignore, 2) - shadowed_gt_inds = torch.cat( - (shadowed_gt_inds, torch.nonzero( - is_bbox_in_gt_core, as_tuple=False)), - dim=0) - # `is_bbox_in_gt_core` should be changed back to keep arguments intact. - is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 - # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` - if shadowed_gt_inds.numel() > 0: - shadowed_gt_inds[:, 1] += 1 - return assigned_gt_inds, shadowed_gt_inds diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/__init__.py deleted file mode 100644 index bbe70145b8bf7c304370f725f5afa8db98666679..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .res_layer import ResLayer - -__all__ = ['ResLayer'] diff --git a/spaces/abidlabs/supabase/app.py b/spaces/abidlabs/supabase/app.py deleted file mode 100644 index 576a04470a48ed4fc6585c1de229538412d42589..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/supabase/app.py +++ /dev/null @@ -1,19 +0,0 @@ -import supabase -import pandas as pd -import os - -client = supabase.create_client(os.environ['SUPABASE_URL'], os.environ['SUPABASE_SECRET_KEY']) - -def read_data(): - response = client.table('Product').select("*").execute() - df = pd.DataFrame(response.data) - return df - -import gradio as gr - -with gr.Blocks() as dashboard: - with gr.Row(): - gr.BarPlot(read_data, x="product_id", y="price", title="Prices", every=60) - gr.BarPlot(read_data, x="product_id", y="inventory_count", title="Inventory", every=60) - -dashboard.queue().launch() diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/font/__init__.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/font/__init__.py deleted file mode 100644 index 78d7f002536cec3cd23c449b01cd70ac93068d6c..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/font/__init__.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Load fonts. - -pyglet will automatically load any system-installed fonts. You can add additional fonts -(for example, from your program resources) using :meth:`add_file` or -:meth:`add_directory`. These fonts are then available in the same way as system-installed fonts:: - - from pyglet import font - font.add_file('action_man.ttf') - action_man = font.load('Action Man', 16) - # or - from pyglet import resource - resource.add_font('action_man.ttf') - action_man = font.load('Action Man') - -See the :mod:`pyglet.font.base` module for documentation on the base classes used -by this package. -""" - -import os -import sys -import weakref - -import pyglet -from pyglet import gl - - -if not getattr(sys, 'is_pyglet_doc_run', False): - if pyglet.compat_platform == 'darwin': - from pyglet.font.quartz import QuartzFont - _font_class = QuartzFont - - elif pyglet.compat_platform in ('win32', 'cygwin'): - from pyglet.libs.win32.constants import WINDOWS_7_OR_GREATER - if WINDOWS_7_OR_GREATER and not pyglet.options['win32_gdi_font']: - from pyglet.font.directwrite import Win32DirectWriteFont - _font_class = Win32DirectWriteFont - else: - from pyglet.font.win32 import GDIPlusFont - _font_class = GDIPlusFont - - else: - from pyglet.font.freetype import FreeTypeFont - _font_class = FreeTypeFont - - -def have_font(name): - """Check if specified system font name is available.""" - return _font_class.have_font(name) - - -def load(name=None, size=None, bold=False, italic=False, stretch=False, dpi=None): - """Load a font for rendering. - - :Parameters: - `name` : str, or list of str - Font family, for example, "Times New Roman". If a list of names - is provided, the first one matching a known font is used. If no - font can be matched to the name(s), a default font is used. In - pyglet 1.1, the name may be omitted. - `size` : float - Size of the font, in points. The returned font may be an exact - match or the closest available. - `bold` : bool - If True, a bold variant is returned, if one exists for the given - family and size. - `italic` : bool - If True, an italic variant is returned, if one exists for the given - family and size. - `dpi` : float - The assumed resolution of the display device, for the purposes of - determining the pixel size of the font. Defaults to 96. - - :rtype: `Font` - """ - # Arbitrary default size - if size is None: - size = 12 - - if dpi is None: - dpi = 96 - - # Locate or create font cache - shared_object_space = gl.current_context.object_space - if not hasattr(shared_object_space, 'pyglet_font_font_cache'): - shared_object_space.pyglet_font_font_cache = weakref.WeakValueDictionary() - shared_object_space.pyglet_font_font_hold = [] - shared_object_space.pyglet_font_font_name_match = {} # Match a tuple to specific name to reduce lookups. - - font_cache = shared_object_space.pyglet_font_font_cache - font_hold = shared_object_space.pyglet_font_font_hold - font_name_match = shared_object_space.pyglet_font_font_name_match - - name_type = type(name) - if name_type in (tuple, list): - if name_type == list: - name = tuple(name) - - if name in font_name_match: - name = font_name_match[name] - else: - # Find first matching name, cache it. - found_name = None - for n in name: - if _font_class.have_font(n): - found_name = n - break - - font_name_match[name] = found_name - name = found_name - - # Look for font name in font cache - descriptor = (name, size, bold, italic, stretch, dpi) - if descriptor in font_cache: - return font_cache[descriptor] - - # Not in cache, create from scratch - font = _font_class(name, size, bold=bold, italic=italic, stretch=stretch, dpi=dpi) - - # Save parameters for new-style layout classes to recover - # TODO: add properties to the Font classes, so these can be queried: - font.size = size - font.bold = bold - font.italic = italic - font.stretch = stretch - font.dpi = dpi - - # Cache font in weak-ref dictionary to avoid reloading while still in use - font_cache[descriptor] = font - - # Hold onto refs of last three loaded fonts to prevent them being - # collected if momentarily dropped. - del font_hold[3:] - font_hold.insert(0, font) - - return font - - -def add_file(font): - """Add a font to pyglet's search path. - - In order to load a font that is not installed on the system, you must - call this method to tell pyglet that it exists. You can supply - either a filename or any file-like object. - - The font format is platform-dependent, but is typically a TrueType font - file containing a single font face. Note that to use a font added with this method, - you should pass the face name (not the file name) to :meth::py:func:`pyglet.font.load` or any - other place where you normally specify a font. - - :Parameters: - `font` : str or file-like object - Filename or file-like object to load fonts from. - - """ - if isinstance(font, str): - font = open(font, 'rb') - if hasattr(font, 'read'): - font = font.read() - _font_class.add_font_data(font) - - -def add_directory(directory): - """Add a directory of fonts to pyglet's search path. - - This function simply calls :meth:`pyglet.font.add_file` for each file with a ``.ttf`` - extension in the given directory. Subdirectories are not searched. - - :Parameters: - `dir` : str - Directory that contains font files. - - """ - for file in os.listdir(directory): - if file[-4:].lower() == '.ttf': - add_file(os.path.join(directory, file)) - - -__all__ = ('add_file', 'add_directory', 'load', 'have_font') diff --git a/spaces/aichina/Pix2Pix-Video/README.md b/spaces/aichina/Pix2Pix-Video/README.md deleted file mode 100644 index 3d8f7d06e470e918dedf27b7a230a565996a1252..0000000000000000000000000000000000000000 --- a/spaces/aichina/Pix2Pix-Video/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Pix2Pix Video -emoji: 🎨🎞️ -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -duplicated_from: fffiloni/Pix2Pix-Video ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/Detic/tools/create_imagenetlvis_json.py b/spaces/akhaliq/Detic/tools/create_imagenetlvis_json.py deleted file mode 100644 index 4d5a0b3712b5a2fb94737b8dfe5d70202305926b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Detic/tools/create_imagenetlvis_json.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import argparse -import json -import os -import cv2 -from nltk.corpus import wordnet - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--imagenet_path', default='datasets/imagenet/ImageNet-LVIS') - parser.add_argument('--lvis_meta_path', default='datasets/lvis/lvis_v1_val.json') - parser.add_argument('--out_path', default='datasets/imagenet/annotations/imagenet_lvis_image_info.json') - args = parser.parse_args() - - print('Loading LVIS meta') - data = json.load(open(args.lvis_meta_path, 'r')) - print('Done') - synset2cat = {x['synset']: x for x in data['categories']} - count = 0 - images = [] - image_counts = {} - folders = sorted(os.listdir(args.imagenet_path)) - for i, folder in enumerate(folders): - class_path = args.imagenet_path + folder - files = sorted(os.listdir(class_path)) - synset = wordnet.synset_from_pos_and_offset('n', int(folder[1:])).name() - cat = synset2cat[synset] - cat_id = cat['id'] - cat_name = cat['name'] - cat_images = [] - for file in files: - count = count + 1 - file_name = '{}/{}'.format(folder, file) - img = cv2.imread('{}/{}'.format(args.imagenet_path, file_name)) - h, w = img.shape[:2] - image = { - 'id': count, - 'file_name': file_name, - 'pos_category_ids': [cat_id], - 'width': w, - 'height': h - } - cat_images.append(image) - images.extend(cat_images) - image_counts[cat_id] = len(cat_images) - print(i, cat_name, len(cat_images)) - print('# Images', len(images)) - for x in data['categories']: - x['image_count'] = image_counts[x['id']] if x['id'] in image_counts else 0 - out = {'categories': data['categories'], 'images': images, 'annotations': []} - print('Writing to', args.out_path) - json.dump(out, open(args.out_path, 'w')) diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jnas/voc1/run.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jnas/voc1/run.sh deleted file mode 100644 index fd614e1d07287b107012877565db015e57ebc96b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jnas/voc1/run.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/bin/bash - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -. ./cmd.sh || exit 1; -. ./path.sh || exit 1; - -# basic settings -stage=0 # stage to start -stop_stage=100 # stage to stop -verbose=1 # verbosity level (lower is less info) -n_gpus=1 # number of gpus in training -n_jobs=16 # number of parallel jobs in feature extraction - -# NOTE(kan-bayashi): renamed to conf to avoid conflict in parse_options.sh -conf=conf/parallel_wavegan.v1.yaml - -# directory path setting -db_root=/database/JNAS # database direcotry -dumpdir=dump # directory to dump features - -# training related setting -tag="" # tag for directory to save model -resume="" # checkpoint path to resume training - # (e.g. //checkpoint-10000steps.pkl) - -# decoding related setting -checkpoint="" # checkpoint path to be used for decoding - # if not provided, the latest one will be used - # (e.g. //checkpoint-400000steps.pkl) - -# shellcheck disable=SC1091 -. utils/parse_options.sh || exit 1; - -train_set="train_nodev" # name of training data directory -dev_set="dev" # name of development data direcotry -eval_set="eval" # name of evaluation data direcotry - -set -euo pipefail - -if [ "${stage}" -le 0 ] && [ "${stop_stage}" -ge 0 ]; then - echo "Stage 0: Data preparation" - local/data_prep.sh \ - --train_set "${train_set}" \ - --dev_set "${dev_set}" \ - --eval_set "${eval_set}" \ - "${db_root}" data conf/train_speakers.txt -fi - -stats_ext=$(grep -q "hdf5" <(yq ".format" "${conf}") && echo "h5" || echo "npy") -if [ "${stage}" -le 1 ] && [ "${stop_stage}" -ge 1 ]; then - echo "Stage 1: Feature extraction" - # extract raw features - pids=() - for name in "${train_set}" "${dev_set}" "${eval_set}"; do - ( - [ ! -e "${dumpdir}/${name}/raw" ] && mkdir -p "${dumpdir}/${name}/raw" - echo "Feature extraction start. See the progress via ${dumpdir}/${name}/raw/preprocessing.*.log." - utils/make_subset_data.sh "data/${name}" "${n_jobs}" "${dumpdir}/${name}/raw" - ${train_cmd} JOB=1:${n_jobs} "${dumpdir}/${name}/raw/preprocessing.JOB.log" \ - parallel-wavegan-preprocess \ - --config "${conf}" \ - --scp "${dumpdir}/${name}/raw/wav.JOB.scp" \ - --dumpdir "${dumpdir}/${name}/raw/dump.JOB" \ - --verbose "${verbose}" - echo "Successfully finished feature extraction of ${name} set." - ) & - pids+=($!) - done - i=0; for pid in "${pids[@]}"; do wait "${pid}" || ((++i)); done - [ "${i}" -gt 0 ] && echo "$0: ${i} background jobs are failed." && exit 1; - echo "Successfully finished feature extraction." - - # calculate statistics for normalization - echo "Statistics computation start. See the progress via ${dumpdir}/${train_set}/compute_statistics.log." - ${train_cmd} "${dumpdir}/${train_set}/compute_statistics.log" \ - parallel-wavegan-compute-statistics \ - --config "${conf}" \ - --rootdir "${dumpdir}/${train_set}/raw" \ - --dumpdir "${dumpdir}/${train_set}" \ - --verbose "${verbose}" - echo "Successfully finished calculation of statistics." - - # normalize and dump them - pids=() - for name in "${train_set}" "${dev_set}" "${eval_set}"; do - ( - [ ! -e "${dumpdir}/${name}/norm" ] && mkdir -p "${dumpdir}/${name}/norm" - echo "Nomalization start. See the progress via ${dumpdir}/${name}/norm/normalize.*.log." - ${train_cmd} JOB=1:${n_jobs} "${dumpdir}/${name}/norm/normalize.JOB.log" \ - parallel-wavegan-normalize \ - --config "${conf}" \ - --stats "${dumpdir}/${train_set}/stats.${stats_ext}" \ - --rootdir "${dumpdir}/${name}/raw/dump.JOB" \ - --dumpdir "${dumpdir}/${name}/norm/dump.JOB" \ - --verbose "${verbose}" - echo "Successfully finished normalization of ${name} set." - ) & - pids+=($!) - done - i=0; for pid in "${pids[@]}"; do wait "${pid}" || ((++i)); done - [ "${i}" -gt 0 ] && echo "$0: ${i} background jobs are failed." && exit 1; - echo "Successfully finished normalization." -fi - -if [ -z "${tag}" ]; then - expdir="exp/${train_set}_jnas_$(basename "${conf}" .yaml)" -else - expdir="exp/${train_set}_jnas_${tag}" -fi -if [ "${stage}" -le 2 ] && [ "${stop_stage}" -ge 2 ]; then - echo "Stage 2: Network training" - [ ! -e "${expdir}" ] && mkdir -p "${expdir}" - cp "${dumpdir}/${train_set}/stats.${stats_ext}" "${expdir}" - if [ "${n_gpus}" -gt 1 ]; then - train="python -m parallel_wavegan.distributed.launch --nproc_per_node ${n_gpus} -c parallel-wavegan-train" - else - train="parallel-wavegan-train" - fi - echo "Training start. See the progress via ${expdir}/train.log." - ${cuda_cmd} --gpu "${n_gpus}" "${expdir}/train.log" \ - ${train} \ - --config "${conf}" \ - --train-dumpdir "${dumpdir}/${train_set}/norm" \ - --dev-dumpdir "${dumpdir}/${dev_set}/norm" \ - --outdir "${expdir}" \ - --resume "${resume}" \ - --verbose "${verbose}" - echo "Successfully finished training." -fi - -if [ "${stage}" -le 3 ] && [ "${stop_stage}" -ge 3 ]; then - echo "Stage 3: Network decoding" - # shellcheck disable=SC2012 - [ -z "${checkpoint}" ] && checkpoint="$(ls -dt "${expdir}"/*.pkl | head -1 || true)" - outdir="${expdir}/wav/$(basename "${checkpoint}" .pkl)" - pids=() - for name in "${dev_set}" "${eval_set}"; do - ( - [ ! -e "${outdir}/${name}" ] && mkdir -p "${outdir}/${name}" - [ "${n_gpus}" -gt 1 ] && n_gpus=1 - echo "Decoding start. See the progress via ${outdir}/${name}/decode.log." - ${cuda_cmd} --gpu "${n_gpus}" "${outdir}/${name}/decode.log" \ - parallel-wavegan-decode \ - --dumpdir "${dumpdir}/${name}/norm" \ - --checkpoint "${checkpoint}" \ - --outdir "${outdir}/${name}" \ - --verbose "${verbose}" - echo "Successfully finished decoding of ${name} set." - ) & - pids+=($!) - done - i=0; for pid in "${pids[@]}"; do wait "${pid}" || ((++i)); done - [ "${i}" -gt 0 ] && echo "$0: ${i} background jobs are failed." && exit 1; - echo "Successfully finished decoding." -fi -echo "Finished." diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jsss/voc1/local/data_prep.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jsss/voc1/local/data_prep.sh deleted file mode 100644 index 62e42b6e1468c9b502cdf33131ba6bc0982c458a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jsss/voc1/local/data_prep.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -# Prepare kaldi-style data directory for JSSS corpus - -fs=24000 -num_dev=50 -num_eval=50 -train_set="train_nodev" -dev_set="dev" -eval_set="eval" -shuffle=false - -# shellcheck disable=SC1091 -. utils/parse_options.sh || exit 1; - -db=$1 -data_dir_root=$2 - -# check arguments -if [ $# != 2 ]; then - echo "Usage: $0 [Options] " - echo "e.g.: $0 downloads/jsss_ver1 data" - echo "" - echo "Options:" - echo " --fs: target sampling rate (default=24000)." - echo " --num_dev: number of development uttreances (default=50)." - echo " --num_eval: number of evaluation uttreances (default=50)." - echo " --train_set: name of train set (default=train_nodev)." - echo " --dev_set: name of dev set (default=dev)." - echo " --eval_set: name of eval set (default=eval)." - echo " --shuffle: whether to perform shuffle in making dev / eval set (default=false)." - exit 1 -fi - -set -euo pipefail - -###################################### -# process data without segments # -###################################### -dsets_without_segments=" -short-form/basic5000 -short-form/onomatopee300 -short-form/voiceactress100 -simplification -" -for dset in ${dsets_without_segments}; do - # check directory existence - _data_dir=${data_dir_root}/$(basename "${dset}") - [ ! -e "${_data_dir}" ] && mkdir -p "${_data_dir}" - - # set filenames - scp=${_data_dir}/wav.scp - segments=${_data_dir}/segments - - # check file existence - [ -e "${scp}" ] && rm "${scp}" - [ -e "${segments}" ] && rm "${segments}" - - # make wav.scp and segments - find "${db}/${dset}/wav24kHz16bit" -name "*.wav" | sort | while read -r filename; do - utt_id=$(basename "${filename}" | sed -e "s/\.[^\.]*$//g") - lab_filename="${db}/${dset}/lab/$(basename "${filename}" .wav).lab" - if [ ! -e "${lab_filename}" ]; then - echo "${lab_filename} does not exist. Skipped." - continue - fi - start_sec=$(head -n 1 "${lab_filename}" | cut -d " " -f 2) - end_sec=$(tail -n 1 "${lab_filename}" | cut -d " " -f 1) - echo "${utt_id} ${utt_id} ${start_sec} ${end_sec}" >> "${segments}" - if [ "${fs}" -eq 24000 ]; then - # default sampling rate - echo "${utt_id} ${filename}" >> "${scp}" - else - echo "${utt_id} sox ${filename} -t wav -r $fs - |" >> "${scp}" - fi - done - echo "Successfully prepared ${dset}." -done - -###################################### -# process data with segments # -###################################### -dsets_with_segments=" -long-form/katsura-masakazu -long-form/udon -long-form/washington-dc -summarization -" -for dset in ${dsets_with_segments}; do - # check directory existence - _data_dir=${data_dir_root}/$(basename "${dset}") - [ ! -e "${_data_dir}" ] && mkdir -p "${_data_dir}" - - # set filenames - scp=${_data_dir}/wav.scp - segments=${_data_dir}/segments - - # check file existence - [ -e "${scp}" ] && rm "${scp}" - [ -e "${segments}" ] && rm "${segments}" - - # make wav.scp - find "${db}/${dset}/wav24kHz16bit" -name "*.wav" | sort | while read -r filename; do - wav_id=$(basename "${filename}" | sed -e "s/\.[^\.]*$//g") - if [ "${fs}" -eq 24000 ]; then - # default sampling rate - echo "${wav_id} ${filename}" >> "${scp}" - else - echo "${wav_id} sox ${filename} -t wav -r $fs - |" >> "${scp}" - fi - done - - # make segments - find "${db}/${dset}/transcript_utf8" -name "*.txt" | sort | while read -r filename; do - wav_id=$(basename "${filename}" .txt) - while read -r line; do - start_sec=$(echo "${line}" | cut -f 1) - end_sec=$(echo "${line}" | cut -f 2) - utt_id=${wav_id} - utt_id+="_$(printf %010d "$(echo "${start_sec}" | tr -d "." | sed -e "s/^[0]*//g")")" - utt_id+="_$(printf %010d "$(echo "${end_sec}" | tr -d "." | sed -e "s/^[0]*//g")")" - - # modify segment information with force alignment results - lab_filename=${db}/${dset}/lab/${utt_id}.lab - if [ ! -e "${lab_filename}" ]; then - echo "${lab_filename} does not exist. Skipped." - continue - fi - start_sec_offset=$(head -n 1 "${lab_filename}" | cut -d " " -f 2) - end_sec_offset=$(tail -n 1 "${lab_filename}" | cut -d " " -f 1) - start_sec=$(python -c "print(${start_sec} + ${start_sec_offset})") - end_sec=$(python -c "print(${start_sec} + ${end_sec_offset} - ${start_sec_offset})") - echo "${utt_id} ${wav_id} ${start_sec} ${end_sec}" >> "${segments}" - done < "${filename}" - done - - # fix - echo "Successfully prepared ${dset}." -done - -###################################### -# combine and split data # -###################################### -# combine all data -combined_data_dirs="" -for dset in ${dsets_without_segments} ${dsets_with_segments}; do - combined_data_dirs+="${data_dir_root}/$(basename "${dset}") " -done -# shellcheck disable=SC2086 -utils/combine_data.sh "${data_dir_root}/all" ${combined_data_dirs} -# shellcheck disable=SC2086 -rm -rf ${combined_data_dirs} - -# split -num_all=$(wc -l < "${data_dir_root}/all/segments") -num_deveval=$((num_dev + num_eval)) -num_train=$((num_all - num_deveval)) -utils/split_data.sh \ - --num_first "${num_deveval}" \ - --num_second "${num_train}" \ - --shuffle "${shuffle}" \ - "${data_dir_root}/all" \ - "${data_dir_root}/deveval" \ - "${data_dir_root}/${train_set}" -utils/split_data.sh \ - --num_first "${num_eval}" \ - --num_second "${num_dev}" \ - --shuffle "${shuffle}" \ - "${data_dir_root}/deveval" \ - "${data_dir_root}/${eval_set}" \ - "${data_dir_root}/${dev_set}" - -# remove tmp directories -rm -rf "${data_dir_root}/all" -rm -rf "${data_dir_root}/deveval" - -echo "Successfully prepared data." diff --git a/spaces/alan-chen-intel/dagan-demo/README.md b/spaces/alan-chen-intel/dagan-demo/README.md deleted file mode 100644 index 28dada65086819b5781a69afe2b550219ba3d9ad..0000000000000000000000000000000000000000 --- a/spaces/alan-chen-intel/dagan-demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Dagan Demo -emoji: 👁 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/algomuffin/jojo_fork/e4e/models/encoders/psp_encoders.py b/spaces/algomuffin/jojo_fork/e4e/models/encoders/psp_encoders.py deleted file mode 100644 index dc49acd11f062cbd29f839ee3c04bce7fa84f479..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/e4e/models/encoders/psp_encoders.py +++ /dev/null @@ -1,200 +0,0 @@ -from enum import Enum -import math -import numpy as np -import torch -from torch import nn -from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module - -from e4e.models.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add -from e4e.models.stylegan2.model import EqualLinear - - -class ProgressiveStage(Enum): - WTraining = 0 - Delta1Training = 1 - Delta2Training = 2 - Delta3Training = 3 - Delta4Training = 4 - Delta5Training = 5 - Delta6Training = 6 - Delta7Training = 7 - Delta8Training = 8 - Delta9Training = 9 - Delta10Training = 10 - Delta11Training = 11 - Delta12Training = 12 - Delta13Training = 13 - Delta14Training = 14 - Delta15Training = 15 - Delta16Training = 16 - Delta17Training = 17 - Inference = 18 - - -class GradualStyleBlock(Module): - def __init__(self, in_c, out_c, spatial): - super(GradualStyleBlock, self).__init__() - self.out_c = out_c - self.spatial = spatial - num_pools = int(np.log2(spatial)) - modules = [] - modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1), - nn.LeakyReLU()] - for i in range(num_pools - 1): - modules += [ - Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1), - nn.LeakyReLU() - ] - self.convs = nn.Sequential(*modules) - self.linear = EqualLinear(out_c, out_c, lr_mul=1) - - def forward(self, x): - x = self.convs(x) - x = x.view(-1, self.out_c) - x = self.linear(x) - return x - - -class GradualStyleEncoder(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(GradualStyleEncoder, self).__init__() - assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - self.styles = nn.ModuleList() - log_size = int(math.log(opts.stylegan_size, 2)) - self.style_count = 2 * log_size - 2 - self.coarse_ind = 3 - self.middle_ind = 7 - for i in range(self.style_count): - if i < self.coarse_ind: - style = GradualStyleBlock(512, 512, 16) - elif i < self.middle_ind: - style = GradualStyleBlock(512, 512, 32) - else: - style = GradualStyleBlock(512, 512, 64) - self.styles.append(style) - self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0) - self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0) - - def forward(self, x): - x = self.input_layer(x) - - latents = [] - modulelist = list(self.body._modules.values()) - for i, l in enumerate(modulelist): - x = l(x) - if i == 6: - c1 = x - elif i == 20: - c2 = x - elif i == 23: - c3 = x - - for j in range(self.coarse_ind): - latents.append(self.styles[j](c3)) - - p2 = _upsample_add(c3, self.latlayer1(c2)) - for j in range(self.coarse_ind, self.middle_ind): - latents.append(self.styles[j](p2)) - - p1 = _upsample_add(p2, self.latlayer2(c1)) - for j in range(self.middle_ind, self.style_count): - latents.append(self.styles[j](p1)) - - out = torch.stack(latents, dim=1) - return out - - -class Encoder4Editing(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(Encoder4Editing, self).__init__() - assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - self.styles = nn.ModuleList() - log_size = int(math.log(opts.stylegan_size, 2)) - self.style_count = 2 * log_size - 2 - self.coarse_ind = 3 - self.middle_ind = 7 - - for i in range(self.style_count): - if i < self.coarse_ind: - style = GradualStyleBlock(512, 512, 16) - elif i < self.middle_ind: - style = GradualStyleBlock(512, 512, 32) - else: - style = GradualStyleBlock(512, 512, 64) - self.styles.append(style) - - self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0) - self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0) - - self.progressive_stage = ProgressiveStage.Inference - - def get_deltas_starting_dimensions(self): - ''' Get a list of the initial dimension of every delta from which it is applied ''' - return list(range(self.style_count)) # Each dimension has a delta applied to it - - def set_progressive_stage(self, new_stage: ProgressiveStage): - self.progressive_stage = new_stage - print('Changed progressive stage to: ', new_stage) - - def forward(self, x): - x = self.input_layer(x) - - modulelist = list(self.body._modules.values()) - for i, l in enumerate(modulelist): - x = l(x) - if i == 6: - c1 = x - elif i == 20: - c2 = x - elif i == 23: - c3 = x - - # Infer main W and duplicate it - w0 = self.styles[0](c3) - w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2) - stage = self.progressive_stage.value - features = c3 - for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas - if i == self.coarse_ind: - p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features - features = p2 - elif i == self.middle_ind: - p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features - features = p1 - delta_i = self.styles[i](features) - w[:, i] += delta_i - return w diff --git a/spaces/aliabid94/AutoGPT/autogpt/memory/base.py b/spaces/aliabid94/AutoGPT/autogpt/memory/base.py deleted file mode 100644 index 691e2299c4caa5c2e9af5b2436727834f3cc6c67..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/AutoGPT/autogpt/memory/base.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Base class for memory providers.""" -import abc - -import openai - -from autogpt.config import AbstractSingleton, Config - -cfg = Config() - - -def get_ada_embedding(text): - text = text.replace("\n", " ") - if cfg.use_azure: - return openai.Embedding.create( - input=[text], - engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"), - )["data"][0]["embedding"] - else: - return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[ - "data" - ][0]["embedding"] - - -class MemoryProviderSingleton(AbstractSingleton): - @abc.abstractmethod - def add(self, data): - pass - - @abc.abstractmethod - def get(self, data): - pass - - @abc.abstractmethod - def clear(self): - pass - - @abc.abstractmethod - def get_relevant(self, data, num_relevant=5): - pass - - @abc.abstractmethod - def get_stats(self): - pass diff --git a/spaces/aminghias/text_analytics_project/README.md b/spaces/aminghias/text_analytics_project/README.md deleted file mode 100644 index a56a019721429551802a52d60838489901e121d2..0000000000000000000000000000000000000000 --- a/spaces/aminghias/text_analytics_project/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text Analytics Project -emoji: 💻 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/anhnv125/FRN/models/frn.py b/spaces/anhnv125/FRN/models/frn.py deleted file mode 100644 index ca6a8dba47dd98e8976ed5b3ce17994a66e6580c..0000000000000000000000000000000000000000 --- a/spaces/anhnv125/FRN/models/frn.py +++ /dev/null @@ -1,220 +0,0 @@ -import os - -import librosa -import pytorch_lightning as pl -import soundfile as sf -import torch -from torch import nn -from torch.utils.data import DataLoader -from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality as PESQ -from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility as STOI - -from PLCMOS.plc_mos import PLCMOSEstimator -from config import CONFIG -from loss import Loss -from models.blocks import Encoder, Predictor -from utils.utils import visualize, LSD - -plcmos = PLCMOSEstimator() - - -class PLCModel(pl.LightningModule): - def __init__(self, train_dataset=None, val_dataset=None, window_size=960, enc_layers=4, enc_in_dim=384, enc_dim=768, - pred_dim=512, pred_layers=1, pred_ckpt_path='lightning_logs/predictor/checkpoints/predictor.ckpt'): - super(PLCModel, self).__init__() - self.window_size = window_size - self.hop_size = window_size // 2 - self.learning_rate = CONFIG.TRAIN.lr - self.hparams.batch_size = CONFIG.TRAIN.batch_size - - self.enc_layers = enc_layers - self.enc_in_dim = enc_in_dim - self.enc_dim = enc_dim - self.pred_dim = pred_dim - self.pred_layers = pred_layers - self.train_dataset = train_dataset - self.val_dataset = val_dataset - self.stoi = STOI(48000) - self.pesq = PESQ(16000, 'wb') - - if pred_ckpt_path is not None: - self.predictor = Predictor.load_from_checkpoint(pred_ckpt_path) - else: - self.predictor = Predictor(window_size=self.window_size, lstm_dim=self.pred_dim, - lstm_layers=self.pred_layers) - self.joiner = nn.Sequential( - nn.Conv2d(3, 48, kernel_size=(9, 1), stride=1, padding=(4, 0), padding_mode='reflect', - groups=3), - nn.LeakyReLU(0.2), - nn.Conv2d(48, 2, kernel_size=1, stride=1, padding=0, groups=2), - ) - - self.encoder = Encoder(in_dim=self.window_size, dim=self.enc_in_dim, depth=self.enc_layers, - mlp_dim=self.enc_dim) - - self.loss = Loss() - self.window = torch.sqrt(torch.hann_window(self.window_size)) - self.save_hyperparameters('window_size', 'enc_layers', 'enc_in_dim', 'enc_dim', 'pred_dim', 'pred_layers') - - def forward(self, x): - """ - Input: real-imaginary; shape (B, F, T, 2); F = hop_size + 1 - Output: real-imaginary - """ - - B, C, F, T = x.shape - - x = x.permute(3, 0, 1, 2).unsqueeze(-1) - prev_mag = torch.zeros((B, 1, F, 1), device=x.device) - predictor_state = torch.zeros((2, self.predictor.lstm_layers, B, self.predictor.lstm_dim), device=x.device) - mlp_state = torch.zeros((self.encoder.depth, 2, 1, B, self.encoder.dim), device=x.device) - result = [] - for step in x: - feat, mlp_state = self.encoder(step, mlp_state) - prev_mag, predictor_state = self.predictor(prev_mag, predictor_state) - feat = torch.cat((feat, prev_mag), 1) - feat = self.joiner(feat) - feat = feat + step - result.append(feat) - prev_mag = torch.linalg.norm(feat, dim=1, ord=1, keepdims=True) # compute magnitude - output = torch.cat(result, -1) - return output - - def forward_onnx(self, x, prev_mag, predictor_state=None, mlp_state=None): - prev_mag, predictor_state = self.predictor(prev_mag, predictor_state) - feat, mlp_state = self.encoder(x, mlp_state) - - feat = torch.cat((feat, prev_mag), 1) - feat = self.joiner(feat) - prev_mag = torch.linalg.norm(feat, dim=1, ord=1, keepdims=True) - feat = feat + x - return feat, prev_mag, predictor_state, mlp_state - - def train_dataloader(self): - return DataLoader(self.train_dataset, shuffle=False, batch_size=self.hparams.batch_size, - num_workers=CONFIG.TRAIN.workers, persistent_workers=True) - - def val_dataloader(self): - return DataLoader(self.val_dataset, shuffle=False, batch_size=self.hparams.batch_size, - num_workers=CONFIG.TRAIN.workers, persistent_workers=True) - - def training_step(self, batch, batch_idx): - x_in, y = batch - f_0 = x_in[:, :, 0:1, :] - x = x_in[:, :, 1:, :] - - x = self(x) - x = torch.cat([f_0, x], dim=2) - - loss = self.loss(x, y) - self.log('train_loss', loss, logger=True) - return loss - - def validation_step(self, val_batch, batch_idx): - x, y = val_batch - f_0 = x[:, :, 0:1, :] - x_in = x[:, :, 1:, :] - - pred = self(x_in) - pred = torch.cat([f_0, pred], dim=2) - - loss = self.loss(pred, y) - self.window = self.window.to(pred.device) - pred = torch.view_as_complex(pred.permute(0, 2, 3, 1).contiguous()) - pred = torch.istft(pred, self.window_size, self.hop_size, window=self.window) - y = torch.view_as_complex(y.permute(0, 2, 3, 1).contiguous()) - y = torch.istft(y, self.window_size, self.hop_size, window=self.window) - - self.log('val_loss', loss, on_step=False, on_epoch=True, logger=True, prog_bar=True, sync_dist=True) - - if batch_idx == 0: - i = torch.randint(0, x.shape[0], (1,)).item() - x = torch.view_as_complex(x.permute(0, 2, 3, 1).contiguous()) - x = torch.istft(x[i], self.window_size, self.hop_size, window=self.window) - - self.trainer.logger.log_spectrogram(y[i], x, pred[i], self.current_epoch) - self.trainer.logger.log_audio(y[i], x, pred[i], self.current_epoch) - - def test_step(self, test_batch, batch_idx): - inp, tar, inp_wav, tar_wav = test_batch - inp_wav = inp_wav.squeeze() - tar_wav = tar_wav.squeeze() - f_0 = inp[:, :, 0:1, :] - x = inp[:, :, 1:, :] - pred = self(x) - pred = torch.cat([f_0, pred], dim=2) - pred = torch.istft(pred.squeeze(0).permute(1, 2, 0), self.window_size, self.hop_size, - window=self.window.to(pred.device)) - stoi = self.stoi(pred, tar_wav) - - tar_wav = tar_wav.cpu().numpy() - inp_wav = inp_wav.cpu().numpy() - pred = pred.detach().cpu().numpy() - lsd, _ = LSD(tar_wav, pred) - - if batch_idx in [5, 7, 9]: - sample_path = os.path.join(CONFIG.LOG.sample_path) - path = os.path.join(sample_path, 'sample_' + str(batch_idx)) - visualize(tar_wav, inp_wav, pred, path) - sf.write(os.path.join(path, 'enhanced_output.wav'), pred, samplerate=CONFIG.DATA.sr, subtype='PCM_16') - sf.write(os.path.join(path, 'lossy_input.wav'), inp_wav, samplerate=CONFIG.DATA.sr, subtype='PCM_16') - sf.write(os.path.join(path, 'target.wav'), tar_wav, samplerate=CONFIG.DATA.sr, subtype='PCM_16') - if CONFIG.DATA.sr != 16000: - pred = librosa.resample(pred, orig_sr=48000, target_sr=16000) - tar_wav = librosa.resample(tar_wav, orig_sr=48000, target_sr=16000, res_type='kaiser_fast') - ret = plcmos.run(pred, tar_wav) - pesq = self.pesq(torch.tensor(pred), torch.tensor(tar_wav)) - metrics = { - "Intrusive": ret[0], - "Non-intrusive": ret[1], - 'LSD': lsd, - 'STOI': stoi, - 'PESQ': pesq, - } - self.log_dict(metrics) - return metrics - - def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0): - f_0 = batch[:, :, 0:1, :] - x = batch[:, :, 1:, :] - pred = self(x) - pred = torch.cat([f_0, pred], dim=2) - pred = torch.istft(pred.squeeze(0).permute(1, 2, 0), self.window_size, self.hop_size, - window=self.window.to(pred.device)) - return pred - - def configure_optimizers(self): - optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) - lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=CONFIG.TRAIN.patience, - factor=CONFIG.TRAIN.factor, verbose=True) - - scheduler = { - 'scheduler': lr_scheduler, - 'reduce_on_plateau': True, - 'monitor': 'val_loss' - } - return [optimizer], [scheduler] - - -class OnnxWrapper(pl.LightningModule): - def __init__(self, model, *args, **kwargs): - super().__init__(*args, **kwargs) - self.model = model - batch_size = 1 - pred_states = torch.zeros((2, 1, batch_size, model.predictor.lstm_dim)) - mlp_states = torch.zeros((model.encoder.depth, 2, 1, batch_size, model.encoder.dim)) - mag = torch.zeros((batch_size, 1, model.hop_size, 1)) - x = torch.randn(batch_size, model.hop_size + 1, 2) - self.sample = (x, mag, pred_states, mlp_states) - self.input_names = ['input', 'mag_in_cached_', 'pred_state_in_cached_', 'mlp_state_in_cached_'] - self.output_names = ['output', 'mag_out_cached_', 'pred_state_out_cached_', 'mlp_state_out_cached_'] - - def forward(self, x, prev_mag, predictor_state=None, mlp_state=None): - x = x.permute(0, 2, 1).unsqueeze(-1) - f_0 = x[:, :, 0:1, :] - x = x[:, :, 1:, :] - - output, prev_mag, predictor_state, mlp_state = self.model.forward_onnx(x, prev_mag, predictor_state, mlp_state) - output = torch.cat([f_0, output], dim=2) - output = output.squeeze(-1).permute(0, 2, 1) - return output, prev_mag, predictor_state, mlp_state diff --git a/spaces/annchen2010/ChatGPT/utils.py b/spaces/annchen2010/ChatGPT/utils.py deleted file mode 100644 index f6e4fa4e8a9f908baa4509d7206ff3455ac57f39..0000000000000000000000000000000000000000 --- a/spaces/annchen2010/ChatGPT/utils.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter - -from presets import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
{highlighted_code}
' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - if inline_code_pattern.search(non_code): - result.append(markdown(non_code, extensions=["tables"])) - else: - result.append(mdtex2html.convert(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - return result - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def construct_token_message(token, stream=False): - return f"Token 计数: {token}" - - -def delete_last_conversation(chatbot, history, previous_token_count): - if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]: - logging.info("由于包含报错信息,只删除chatbot记录") - chatbot.pop() - return chatbot, history - if len(history) > 0: - logging.info("删除了一组对话历史") - history.pop() - history.pop() - if len(chatbot) > 0: - logging.info("删除了一组chatbot对话") - chatbot.pop() - if len(previous_token_count) > 0: - logging.info("删除了一组对话的token计数记录") - previous_token_count.pop() - return ( - chatbot, - history, - previous_token_count, - construct_token_message(sum(previous_token_count)), - ) - - -def save_file(filename, system, history, chatbot): - logging.info("保存对话历史中……") - os.makedirs(HISTORY_DIR, exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - print(json_s) - with open(os.path.join(HISTORY_DIR, filename), "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.info("保存对话历史完毕") - return os.path.join(HISTORY_DIR, filename) - - -def save_chat_history(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, system, history, chatbot) - - -def export_markdown(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, system, history, chatbot) - - -def load_chat_history(filename, system, history, chatbot): - logging.info("加载对话历史中……") - if type(filename) != str: - filename = filename.name - try: - with open(os.path.join(HISTORY_DIR, filename), "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - # 没有对话历史 - pass - logging.info("加载对话历史完毕") - return filename, json_s["system"], json_s["history"], json_s["chatbot"] - except FileNotFoundError: - logging.info("没有找到对话历史文件,不执行任何操作") - return filename, system, history, chatbot - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False): - logging.info("获取历史记录文件名列表") - return get_file_names(HISTORY_DIR, plain) - - -def load_template(filename, mode=0): - logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - logging.info("Loading template...") - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices, value=choices[0] - ) - - -def get_template_names(plain=False): - logging.info("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_state(): - logging.info("重置状态") - return [], [], [], construct_token_message(0) - - -def reset_textbox(): - return gr.update(value="") - - -def reset_default(): - global API_URL - API_URL = "https://api.openai.com/v1/chat/completions" - os.environ.pop("HTTPS_PROXY", None) - os.environ.pop("https_proxy", None) - return gr.update(value=API_URL), gr.update(value=""), "API URL 和代理已重置" - - -def change_api_url(url): - global API_URL - API_URL = url - msg = f"API地址更改为了{url}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def sha1sum(filename): - sha1 = hashlib.sha1() - sha1.update(filename.encode("utf-8")) - return sha1.hexdigest() - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - response = requests.get("https://ipapi.co/json/", timeout=5) - try: - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用,但请注意,如果您的IP地址在不受支持的地区,您可能会遇到问题。" - ) - else: - return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。" - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = f"您的IP区域:{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i -1 - total = total - lst[i] - return 1 diff --git a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py b/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py deleted file mode 100644 index 052df6220595a1b39b7e2aea37ca4872d113dfd2..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR model and criterion classes. -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# ------------------------------------------------------------------------ -import copy -from typing import List - -import torch -import torch.nn.functional as F -from torch import nn -from torchvision.ops.boxes import nms -from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast - -from groundingdino.util import box_ops, get_tokenlizer -from groundingdino.util.misc import ( - NestedTensor, - accuracy, - get_world_size, - interpolate, - inverse_sigmoid, - is_dist_avail_and_initialized, - nested_tensor_from_tensor_list, -) -from groundingdino.util.utils import get_phrases_from_posmap -from groundingdino.util.visualizer import COCOVisualizer -from groundingdino.util.vl_utils import create_positive_map_from_span - -from ..registry import MODULE_BUILD_FUNCS -from .backbone import build_backbone -from .bertwarper import ( - BertModelWarper, - generate_masks_with_special_tokens, - generate_masks_with_special_tokens_and_transfer_map, -) -from .transformer import build_transformer -from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss - - -class GroundingDINO(nn.Module): - """This is the Cross-Attention Detector module that performs object detection""" - - def __init__( - self, - backbone, - transformer, - num_queries, - aux_loss=False, - iter_update=False, - query_dim=2, - num_feature_levels=1, - nheads=8, - # two stage - two_stage_type="no", # ['no', 'standard'] - dec_pred_bbox_embed_share=True, - two_stage_class_embed_share=True, - two_stage_bbox_embed_share=True, - num_patterns=0, - dn_number=100, - dn_box_noise_scale=0.4, - dn_label_noise_ratio=0.5, - dn_labelbook_size=100, - text_encoder_type="bert-base-uncased", - sub_sentence_present=True, - max_text_len=256, - ): - """Initializes the model. - Parameters: - backbone: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - self.num_queries = num_queries - self.transformer = transformer - self.hidden_dim = hidden_dim = transformer.d_model - self.num_feature_levels = num_feature_levels - self.nheads = nheads - self.max_text_len = 256 - self.sub_sentence_present = sub_sentence_present - - # setting query dim - self.query_dim = query_dim - assert query_dim == 4 - - # for dn training - self.num_patterns = num_patterns - self.dn_number = dn_number - self.dn_box_noise_scale = dn_box_noise_scale - self.dn_label_noise_ratio = dn_label_noise_ratio - self.dn_labelbook_size = dn_labelbook_size - - # bert - self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) - self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) - self.bert.pooler.dense.weight.requires_grad_(False) - self.bert.pooler.dense.bias.requires_grad_(False) - self.bert = BertModelWarper(bert_model=self.bert) - - self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) - nn.init.constant_(self.feat_map.bias.data, 0) - nn.init.xavier_uniform_(self.feat_map.weight.data) - # freeze - - # special tokens - self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) - - # prepare input projection layers - if num_feature_levels > 1: - num_backbone_outs = len(backbone.num_channels) - input_proj_list = [] - for _ in range(num_backbone_outs): - in_channels = backbone.num_channels[_] - input_proj_list.append( - nn.Sequential( - nn.Conv2d(in_channels, hidden_dim, kernel_size=1), - nn.GroupNorm(32, hidden_dim), - ) - ) - for _ in range(num_feature_levels - num_backbone_outs): - input_proj_list.append( - nn.Sequential( - nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), - nn.GroupNorm(32, hidden_dim), - ) - ) - in_channels = hidden_dim - self.input_proj = nn.ModuleList(input_proj_list) - else: - assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" - self.input_proj = nn.ModuleList( - [ - nn.Sequential( - nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), - nn.GroupNorm(32, hidden_dim), - ) - ] - ) - - self.backbone = backbone - self.aux_loss = aux_loss - self.box_pred_damping = box_pred_damping = None - - self.iter_update = iter_update - assert iter_update, "Why not iter_update?" - - # prepare pred layers - self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share - # prepare class & box embed - _class_embed = ContrastiveEmbed() - - _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) - nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) - nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) - - if dec_pred_bbox_embed_share: - box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] - else: - box_embed_layerlist = [ - copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) - ] - class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] - self.bbox_embed = nn.ModuleList(box_embed_layerlist) - self.class_embed = nn.ModuleList(class_embed_layerlist) - self.transformer.decoder.bbox_embed = self.bbox_embed - self.transformer.decoder.class_embed = self.class_embed - - # two stage - self.two_stage_type = two_stage_type - assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( - two_stage_type - ) - if two_stage_type != "no": - if two_stage_bbox_embed_share: - assert dec_pred_bbox_embed_share - self.transformer.enc_out_bbox_embed = _bbox_embed - else: - self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) - - if two_stage_class_embed_share: - assert dec_pred_bbox_embed_share - self.transformer.enc_out_class_embed = _class_embed - else: - self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) - - self.refpoint_embed = None - - self._reset_parameters() - - def _reset_parameters(self): - # init input_proj - for proj in self.input_proj: - nn.init.xavier_uniform_(proj[0].weight, gain=1) - nn.init.constant_(proj[0].bias, 0) - - def init_ref_points(self, use_num_queries): - self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) - - def forward(self, samples: NestedTensor, targets: List = None, **kw): - """The forward expects a NestedTensor, which consists of: - - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels - - It returns a dict with the following elements: - - "pred_logits": the classification logits (including no-object) for all queries. - Shape= [batch_size x num_queries x num_classes] - - "pred_boxes": The normalized boxes coordinates for all queries, represented as - (center_x, center_y, width, height). These values are normalized in [0, 1], - relative to the size of each individual image (disregarding possible padding). - See PostProcess for information on how to retrieve the unnormalized bounding box. - - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of - dictionnaries containing the two above keys for each decoder layer. - """ - if targets is None: - captions = kw["captions"] - else: - captions = [t["caption"] for t in targets] - len(captions) - - # encoder texts - tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( - samples.device - ) - ( - text_self_attention_masks, - position_ids, - cate_to_token_mask_list, - ) = generate_masks_with_special_tokens_and_transfer_map( - tokenized, self.specical_tokens, self.tokenizer - ) - - if text_self_attention_masks.shape[1] > self.max_text_len: - text_self_attention_masks = text_self_attention_masks[ - :, : self.max_text_len, : self.max_text_len - ] - position_ids = position_ids[:, : self.max_text_len] - tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] - tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] - tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] - - # extract text embeddings - if self.sub_sentence_present: - tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} - tokenized_for_encoder["attention_mask"] = text_self_attention_masks - tokenized_for_encoder["position_ids"] = position_ids - else: - # import ipdb; ipdb.set_trace() - tokenized_for_encoder = tokenized - - bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 - - encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model - text_token_mask = tokenized.attention_mask.bool() # bs, 195 - # text_token_mask: True for nomask, False for mask - # text_self_attention_masks: True for nomask, False for mask - - if encoded_text.shape[1] > self.max_text_len: - encoded_text = encoded_text[:, : self.max_text_len, :] - text_token_mask = text_token_mask[:, : self.max_text_len] - position_ids = position_ids[:, : self.max_text_len] - text_self_attention_masks = text_self_attention_masks[ - :, : self.max_text_len, : self.max_text_len - ] - - text_dict = { - "encoded_text": encoded_text, # bs, 195, d_model - "text_token_mask": text_token_mask, # bs, 195 - "position_ids": position_ids, # bs, 195 - "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 - } - - # import ipdb; ipdb.set_trace() - - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, poss = self.backbone(samples) - - srcs = [] - masks = [] - for l, feat in enumerate(features): - src, mask = feat.decompose() - srcs.append(self.input_proj[l](src)) - masks.append(mask) - assert mask is not None - if self.num_feature_levels > len(srcs): - _len_srcs = len(srcs) - for l in range(_len_srcs, self.num_feature_levels): - if l == _len_srcs: - src = self.input_proj[l](features[-1].tensors) - else: - src = self.input_proj[l](srcs[-1]) - m = samples.mask - mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] - pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) - srcs.append(src) - masks.append(mask) - poss.append(pos_l) - - input_query_bbox = input_query_label = attn_mask = dn_meta = None - hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer( - srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict - ) - - # deformable-detr-like anchor update - outputs_coord_list = [] - for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate( - zip(reference[:-1], self.bbox_embed, hs) - ): - layer_delta_unsig = layer_bbox_embed(layer_hs) - layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) - layer_outputs_unsig = layer_outputs_unsig.sigmoid() - outputs_coord_list.append(layer_outputs_unsig) - outputs_coord_list = torch.stack(outputs_coord_list) - - # output - outputs_class = torch.stack( - [ - layer_cls_embed(layer_hs, text_dict) - for layer_cls_embed, layer_hs in zip(self.class_embed, hs) - ] - ) - out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]} - - # # for intermediate outputs - # if self.aux_loss: - # out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) - - # # for encoder output - # if hs_enc is not None: - # # prepare intermediate outputs - # interm_coord = ref_enc[-1] - # interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict) - # out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} - # out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} - - return out - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_coord): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - return [ - {"pred_logits": a, "pred_boxes": b} - for a, b in zip(outputs_class[:-1], outputs_coord[:-1]) - ] - - -@MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") -def build_groundingdino(args): - - backbone = build_backbone(args) - transformer = build_transformer(args) - - dn_labelbook_size = args.dn_labelbook_size - dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share - sub_sentence_present = args.sub_sentence_present - - model = GroundingDINO( - backbone, - transformer, - num_queries=args.num_queries, - aux_loss=True, - iter_update=True, - query_dim=4, - num_feature_levels=args.num_feature_levels, - nheads=args.nheads, - dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, - two_stage_type=args.two_stage_type, - two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, - two_stage_class_embed_share=args.two_stage_class_embed_share, - num_patterns=args.num_patterns, - dn_number=0, - dn_box_noise_scale=args.dn_box_noise_scale, - dn_label_noise_ratio=args.dn_label_noise_ratio, - dn_labelbook_size=dn_labelbook_size, - text_encoder_type=args.text_encoder_type, - sub_sentence_present=sub_sentence_present, - max_text_len=args.max_text_len, - ) - - return model diff --git a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/vdecoder/hifigan/models.py b/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/vdecoder/hifigan/models.py deleted file mode 100644 index bdc3fa2c3447f360472d94c2fad9bd74993f6410..0000000000000000000000000000000000000000 --- a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/vdecoder/hifigan/models.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (torch.diff(tmp_over_one, dim=1)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/numbers.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/numbers.py deleted file mode 100644 index 4787ea61007656819eb57d52d5865b38c7afa915..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/numbers.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Licensed under WTFPL or the Unlicense or CC0. -# This uses Python 3, but it's easy to port to Python 2 by changing -# strings to u'xx'. - -import itertools -import re - - -def _num2chinese(num: str, big=False, simp=True, o=False, twoalt=False) -> str: - """Convert numerical arabic numbers (0->9) to chinese hanzi numbers (〇 -> 九) - - Args: - num (str): arabic number to convert - big (bool, optional): use financial characters. Defaults to False. - simp (bool, optional): use simplified characters instead of tradictional characters. Defaults to True. - o (bool, optional): use 〇 for 'zero'. Defaults to False. - twoalt (bool, optional): use 两/兩 for 'two' when appropriate. Defaults to False. - - Raises: - ValueError: if number is more than 1e48 - ValueError: if 'e' exposent in number - - Returns: - str: converted number as hanzi characters - """ - - # check num first - nd = str(num) - if abs(float(nd)) >= 1e48: - raise ValueError("number out of range") - if "e" in nd: - raise ValueError("scientific notation is not supported") - c_symbol = "正负点" if simp else "正負點" - if o: # formal - twoalt = False - if big: - c_basic = "零壹贰叁肆伍陆柒捌玖" if simp else "零壹貳參肆伍陸柒捌玖" - c_unit1 = "拾佰仟" - c_twoalt = "贰" if simp else "貳" - else: - c_basic = "〇一二三四五六七八九" if o else "零一二三四五六七八九" - c_unit1 = "十百千" - if twoalt: - c_twoalt = "两" if simp else "兩" - else: - c_twoalt = "二" - c_unit2 = "万亿兆京垓秭穰沟涧正载" if simp else "萬億兆京垓秭穰溝澗正載" - revuniq = lambda l: "".join(k for k, g in itertools.groupby(reversed(l))) - nd = str(num) - result = [] - if nd[0] == "+": - result.append(c_symbol[0]) - elif nd[0] == "-": - result.append(c_symbol[1]) - if "." in nd: - integer, remainder = nd.lstrip("+-").split(".") - else: - integer, remainder = nd.lstrip("+-"), None - if int(integer): - splitted = [integer[max(i - 4, 0) : i] for i in range(len(integer), 0, -4)] - intresult = [] - for nu, unit in enumerate(splitted): - # special cases - if int(unit) == 0: # 0000 - intresult.append(c_basic[0]) - continue - if nu > 0 and int(unit) == 2: # 0002 - intresult.append(c_twoalt + c_unit2[nu - 1]) - continue - ulist = [] - unit = unit.zfill(4) - for nc, ch in enumerate(reversed(unit)): - if ch == "0": - if ulist: # ???0 - ulist.append(c_basic[0]) - elif nc == 0: - ulist.append(c_basic[int(ch)]) - elif nc == 1 and ch == "1" and unit[1] == "0": - # special case for tens - # edit the 'elif' if you don't like - # 十四, 三千零十四, 三千三百一十四 - ulist.append(c_unit1[0]) - elif nc > 1 and ch == "2": - ulist.append(c_twoalt + c_unit1[nc - 1]) - else: - ulist.append(c_basic[int(ch)] + c_unit1[nc - 1]) - ustr = revuniq(ulist) - if nu == 0: - intresult.append(ustr) - else: - intresult.append(ustr + c_unit2[nu - 1]) - result.append(revuniq(intresult).strip(c_basic[0])) - else: - result.append(c_basic[0]) - if remainder: - result.append(c_symbol[2]) - result.append("".join(c_basic[int(ch)] for ch in remainder)) - return "".join(result) - - -def _number_replace(match) -> str: - """function to apply in a match, transform all numbers in a match by chinese characters - - Args: - match (re.Match): numbers regex matches - - Returns: - str: replaced characters for the numbers - """ - match_str: str = match.group() - return _num2chinese(match_str) - - -def replace_numbers_to_characters_in_text(text: str) -> str: - """Replace all arabic numbers in a text by their equivalent in chinese characters (simplified) - - Args: - text (str): input text to transform - - Returns: - str: output text - """ - text = re.sub(r"[0-9]+", _number_replace, text) - return text diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/FunctionArguments.c b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/FunctionArguments.c deleted file mode 100644 index 8333d9366640fcfc5040a859b53535cdd55409c1..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/FunctionArguments.c +++ /dev/null @@ -1,352 +0,0 @@ -//////////////////// ArgTypeTest.proto //////////////////// - - -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact) \ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 : \ - __Pyx__ArgTypeTest(obj, type, name, exact)) - -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /*proto*/ - -//////////////////// ArgTypeTest //////////////////// - -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -//////////////////// RaiseArgTupleInvalid.proto //////////////////// - -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ - -//////////////////// RaiseArgTupleInvalid //////////////////// - -// __Pyx_RaiseArgtupleInvalid raises the correct exception when too -// many or too few positional arguments were found. This handles -// Py_ssize_t formatting correctly. - -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - - -//////////////////// RaiseKeywordRequired.proto //////////////////// - -static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name); /*proto*/ - -//////////////////// RaiseKeywordRequired //////////////////// - -static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name) { - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() needs keyword-only argument %U", func_name, kw_name); - #else - "%s() needs keyword-only argument %s", func_name, - PyString_AS_STRING(kw_name)); - #endif -} - - -//////////////////// RaiseDoubleKeywords.proto //////////////////// - -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ - -//////////////////// RaiseDoubleKeywords //////////////////// - -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - - -//////////////////// RaiseMappingExpected.proto //////////////////// - -static void __Pyx_RaiseMappingExpectedError(PyObject* arg); /*proto*/ - -//////////////////// RaiseMappingExpected //////////////////// - -static void __Pyx_RaiseMappingExpectedError(PyObject* arg) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not a mapping", Py_TYPE(arg)->tp_name); -} - - -//////////////////// KeywordStringCheck.proto //////////////////// - -static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /*proto*/ - -//////////////////// KeywordStringCheck //////////////////// - -// __Pyx_CheckKeywordStrings raises an error if non-string keywords -// were passed to a function, or if any keywords were passed to a -// function that does not accept them. - -static int __Pyx_CheckKeywordStrings( - PyObject *kwdict, - const char* function_name, - int kw_allowed) -{ - PyObject* key = 0; - Py_ssize_t pos = 0; -#if CYTHON_COMPILING_IN_PYPY - /* PyPy appears to check keywords at call time, not at unpacking time => not much to do here */ - if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0)) - goto invalid_keyword; - return 1; -#else - while (PyDict_Next(kwdict, &pos, &key, 0)) { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_Check(key))) - #endif - if (unlikely(!PyUnicode_Check(key))) - goto invalid_keyword_type; - } - if ((!kw_allowed) && unlikely(key)) - goto invalid_keyword; - return 1; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - return 0; -#endif -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif - return 0; -} - - -//////////////////// ParseKeywords.proto //////////////////// - -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ - const char* function_name); /*proto*/ - -//////////////////// ParseKeywords //////////////////// -//@requires: RaiseDoubleKeywords - -// __Pyx_ParseOptionalKeywords copies the optional/unknown keyword -// arguments from the kwds dict into kwds2. If kwds2 is NULL, unknown -// keywords will raise an invalid keyword error. -// -// Three kinds of errors are checked: 1) non-string keywords, 2) -// unexpected keywords and 3) overlap with positional arguments. -// -// If num_posargs is greater 0, it denotes the number of positional -// arguments that were passed and that must therefore not appear -// amongst the keywords as well. -// -// This method does not check for required keyword arguments. - -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - // not found after positional args, check for duplicate - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - // In Py2, we may need to convert the argument name from str to unicode for comparison. - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - // not found after positional args, check for duplicate - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - // need to convert argument name from bytes to unicode for comparison - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - - -//////////////////// MergeKeywords.proto //////////////////// - -static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping); /*proto*/ - -//////////////////// MergeKeywords //////////////////// -//@requires: RaiseDoubleKeywords -//@requires: Optimize.c::dict_iter - -static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping) { - PyObject *iter, *key = NULL, *value = NULL; - int source_is_dict, result; - Py_ssize_t orig_length, ppos = 0; - - iter = __Pyx_dict_iterator(source_mapping, 0, PYIDENT("items"), &orig_length, &source_is_dict); - if (unlikely(!iter)) { - // slow fallback: try converting to dict, then iterate - PyObject *args; - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - args = PyTuple_Pack(1, source_mapping); - if (likely(args)) { - PyObject *fallback = PyObject_Call((PyObject*)&PyDict_Type, args, NULL); - Py_DECREF(args); - if (likely(fallback)) { - iter = __Pyx_dict_iterator(fallback, 1, PYIDENT("items"), &orig_length, &source_is_dict); - Py_DECREF(fallback); - } - } - if (unlikely(!iter)) goto bad; - } - - while (1) { - result = __Pyx_dict_iter_next(iter, orig_length, &ppos, &key, &value, NULL, source_is_dict); - if (unlikely(result < 0)) goto bad; - if (!result) break; - - if (unlikely(PyDict_Contains(kwdict, key))) { - __Pyx_RaiseDoubleKeywordsError("function", key); - result = -1; - } else { - result = PyDict_SetItem(kwdict, key, value); - } - Py_DECREF(key); - Py_DECREF(value); - if (unlikely(result < 0)) goto bad; - } - Py_XDECREF(iter); - return 0; - -bad: - Py_XDECREF(iter); - return -1; -} diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/http.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/http.py deleted file mode 100644 index ca9dc54b215f7977970658250f23e3be137f1b3e..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/http.py +++ /dev/null @@ -1,70 +0,0 @@ -import http.server -import sys -from typing import Mapping, Tuple - -from . import __version__ -from .http_exceptions import HttpProcessingError as HttpProcessingError -from .http_parser import ( - HeadersParser as HeadersParser, - HttpParser as HttpParser, - HttpRequestParser as HttpRequestParser, - HttpResponseParser as HttpResponseParser, - RawRequestMessage as RawRequestMessage, - RawResponseMessage as RawResponseMessage, -) -from .http_websocket import ( - WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE, - WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE, - WS_KEY as WS_KEY, - WebSocketError as WebSocketError, - WebSocketReader as WebSocketReader, - WebSocketWriter as WebSocketWriter, - WSCloseCode as WSCloseCode, - WSMessage as WSMessage, - WSMsgType as WSMsgType, - ws_ext_gen as ws_ext_gen, - ws_ext_parse as ws_ext_parse, -) -from .http_writer import ( - HttpVersion as HttpVersion, - HttpVersion10 as HttpVersion10, - HttpVersion11 as HttpVersion11, - StreamWriter as StreamWriter, -) - -__all__ = ( - "HttpProcessingError", - "RESPONSES", - "SERVER_SOFTWARE", - # .http_writer - "StreamWriter", - "HttpVersion", - "HttpVersion10", - "HttpVersion11", - # .http_parser - "HeadersParser", - "HttpParser", - "HttpRequestParser", - "HttpResponseParser", - "RawRequestMessage", - "RawResponseMessage", - # .http_websocket - "WS_CLOSED_MESSAGE", - "WS_CLOSING_MESSAGE", - "WS_KEY", - "WebSocketReader", - "WebSocketWriter", - "ws_ext_gen", - "ws_ext_parse", - "WSMessage", - "WebSocketError", - "WSMsgType", - "WSCloseCode", -) - - -SERVER_SOFTWARE: str = "Python/{0[0]}.{0[1]} aiohttp/{1}".format( - sys.version_info, __version__ -) - -RESPONSES: Mapping[int, Tuple[str, str]] = http.server.BaseHTTPRequestHandler.responses diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/http_parser.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/http_parser.py deleted file mode 100644 index 5a66ce4b9eec19777800ddc3c0f5e66b2270f9d3..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/http_parser.py +++ /dev/null @@ -1,969 +0,0 @@ -import abc -import asyncio -import collections -import re -import string -import zlib -from contextlib import suppress -from enum import IntEnum -from typing import ( - Any, - Generic, - List, - NamedTuple, - Optional, - Pattern, - Set, - Tuple, - Type, - TypeVar, - Union, - cast, -) - -from multidict import CIMultiDict, CIMultiDictProxy, istr -from yarl import URL - -from . import hdrs -from .base_protocol import BaseProtocol -from .helpers import NO_EXTENSIONS, BaseTimerContext -from .http_exceptions import ( - BadHttpMessage, - BadStatusLine, - ContentEncodingError, - ContentLengthError, - InvalidHeader, - LineTooLong, - TransferEncodingError, -) -from .http_writer import HttpVersion, HttpVersion10 -from .log import internal_logger -from .streams import EMPTY_PAYLOAD, StreamReader -from .typedefs import Final, RawHeaders - -try: - import brotli - - HAS_BROTLI = True -except ImportError: # pragma: no cover - HAS_BROTLI = False - - -__all__ = ( - "HeadersParser", - "HttpParser", - "HttpRequestParser", - "HttpResponseParser", - "RawRequestMessage", - "RawResponseMessage", -) - -ASCIISET: Final[Set[str]] = set(string.printable) - -# See https://tools.ietf.org/html/rfc7230#section-3.1.1 -# and https://tools.ietf.org/html/rfc7230#appendix-B -# -# method = token -# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -# "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -# token = 1*tchar -METHRE: Final[Pattern[str]] = re.compile(r"[!#$%&'*+\-.^_`|~0-9A-Za-z]+") -VERSRE: Final[Pattern[str]] = re.compile(r"HTTP/(\d+).(\d+)") -HDRRE: Final[Pattern[bytes]] = re.compile(rb"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]") - - -class RawRequestMessage(NamedTuple): - method: str - path: str - version: HttpVersion - headers: "CIMultiDictProxy[str]" - raw_headers: RawHeaders - should_close: bool - compression: Optional[str] - upgrade: bool - chunked: bool - url: URL - - -RawResponseMessage = collections.namedtuple( - "RawResponseMessage", - [ - "version", - "code", - "reason", - "headers", - "raw_headers", - "should_close", - "compression", - "upgrade", - "chunked", - ], -) - - -_MsgT = TypeVar("_MsgT", RawRequestMessage, RawResponseMessage) - - -class ParseState(IntEnum): - - PARSE_NONE = 0 - PARSE_LENGTH = 1 - PARSE_CHUNKED = 2 - PARSE_UNTIL_EOF = 3 - - -class ChunkState(IntEnum): - PARSE_CHUNKED_SIZE = 0 - PARSE_CHUNKED_CHUNK = 1 - PARSE_CHUNKED_CHUNK_EOF = 2 - PARSE_MAYBE_TRAILERS = 3 - PARSE_TRAILERS = 4 - - -class HeadersParser: - def __init__( - self, - max_line_size: int = 8190, - max_headers: int = 32768, - max_field_size: int = 8190, - ) -> None: - self.max_line_size = max_line_size - self.max_headers = max_headers - self.max_field_size = max_field_size - - def parse_headers( - self, lines: List[bytes] - ) -> Tuple["CIMultiDictProxy[str]", RawHeaders]: - headers: CIMultiDict[str] = CIMultiDict() - raw_headers = [] - - lines_idx = 1 - line = lines[1] - line_count = len(lines) - - while line: - # Parse initial header name : value pair. - try: - bname, bvalue = line.split(b":", 1) - except ValueError: - raise InvalidHeader(line) from None - - bname = bname.strip(b" \t") - bvalue = bvalue.lstrip() - if HDRRE.search(bname): - raise InvalidHeader(bname) - if len(bname) > self.max_field_size: - raise LineTooLong( - "request header name {}".format( - bname.decode("utf8", "xmlcharrefreplace") - ), - str(self.max_field_size), - str(len(bname)), - ) - - header_length = len(bvalue) - - # next line - lines_idx += 1 - line = lines[lines_idx] - - # consume continuation lines - continuation = line and line[0] in (32, 9) # (' ', '\t') - - if continuation: - bvalue_lst = [bvalue] - while continuation: - header_length += len(line) - if header_length > self.max_field_size: - raise LineTooLong( - "request header field {}".format( - bname.decode("utf8", "xmlcharrefreplace") - ), - str(self.max_field_size), - str(header_length), - ) - bvalue_lst.append(line) - - # next line - lines_idx += 1 - if lines_idx < line_count: - line = lines[lines_idx] - if line: - continuation = line[0] in (32, 9) # (' ', '\t') - else: - line = b"" - break - bvalue = b"".join(bvalue_lst) - else: - if header_length > self.max_field_size: - raise LineTooLong( - "request header field {}".format( - bname.decode("utf8", "xmlcharrefreplace") - ), - str(self.max_field_size), - str(header_length), - ) - - bvalue = bvalue.strip() - name = bname.decode("utf-8", "surrogateescape") - value = bvalue.decode("utf-8", "surrogateescape") - - headers.add(name, value) - raw_headers.append((bname, bvalue)) - - return (CIMultiDictProxy(headers), tuple(raw_headers)) - - -class HttpParser(abc.ABC, Generic[_MsgT]): - def __init__( - self, - protocol: Optional[BaseProtocol] = None, - loop: Optional[asyncio.AbstractEventLoop] = None, - limit: int = 2**16, - max_line_size: int = 8190, - max_headers: int = 32768, - max_field_size: int = 8190, - timer: Optional[BaseTimerContext] = None, - code: Optional[int] = None, - method: Optional[str] = None, - readall: bool = False, - payload_exception: Optional[Type[BaseException]] = None, - response_with_body: bool = True, - read_until_eof: bool = False, - auto_decompress: bool = True, - ) -> None: - self.protocol = protocol - self.loop = loop - self.max_line_size = max_line_size - self.max_headers = max_headers - self.max_field_size = max_field_size - self.timer = timer - self.code = code - self.method = method - self.readall = readall - self.payload_exception = payload_exception - self.response_with_body = response_with_body - self.read_until_eof = read_until_eof - - self._lines: List[bytes] = [] - self._tail = b"" - self._upgraded = False - self._payload = None - self._payload_parser: Optional[HttpPayloadParser] = None - self._auto_decompress = auto_decompress - self._limit = limit - self._headers_parser = HeadersParser(max_line_size, max_headers, max_field_size) - - @abc.abstractmethod - def parse_message(self, lines: List[bytes]) -> _MsgT: - pass - - def feed_eof(self) -> Optional[_MsgT]: - if self._payload_parser is not None: - self._payload_parser.feed_eof() - self._payload_parser = None - else: - # try to extract partial message - if self._tail: - self._lines.append(self._tail) - - if self._lines: - if self._lines[-1] != "\r\n": - self._lines.append(b"") - with suppress(Exception): - return self.parse_message(self._lines) - return None - - def feed_data( - self, - data: bytes, - SEP: bytes = b"\r\n", - EMPTY: bytes = b"", - CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH, - METH_CONNECT: str = hdrs.METH_CONNECT, - SEC_WEBSOCKET_KEY1: istr = hdrs.SEC_WEBSOCKET_KEY1, - ) -> Tuple[List[Tuple[_MsgT, StreamReader]], bool, bytes]: - - messages = [] - - if self._tail: - data, self._tail = self._tail + data, b"" - - data_len = len(data) - start_pos = 0 - loop = self.loop - - while start_pos < data_len: - - # read HTTP message (request/response line + headers), \r\n\r\n - # and split by lines - if self._payload_parser is None and not self._upgraded: - pos = data.find(SEP, start_pos) - # consume \r\n - if pos == start_pos and not self._lines: - start_pos = pos + 2 - continue - - if pos >= start_pos: - # line found - self._lines.append(data[start_pos:pos]) - start_pos = pos + 2 - - # \r\n\r\n found - if self._lines[-1] == EMPTY: - try: - msg: _MsgT = self.parse_message(self._lines) - finally: - self._lines.clear() - - def get_content_length() -> Optional[int]: - # payload length - length_hdr = msg.headers.get(CONTENT_LENGTH) - if length_hdr is None: - return None - - try: - length = int(length_hdr) - except ValueError: - raise InvalidHeader(CONTENT_LENGTH) - - if length < 0: - raise InvalidHeader(CONTENT_LENGTH) - - return length - - length = get_content_length() - # do not support old websocket spec - if SEC_WEBSOCKET_KEY1 in msg.headers: - raise InvalidHeader(SEC_WEBSOCKET_KEY1) - - self._upgraded = msg.upgrade - - method = getattr(msg, "method", self.method) - - assert self.protocol is not None - # calculate payload - if ( - (length is not None and length > 0) - or msg.chunked - and not msg.upgrade - ): - payload = StreamReader( - self.protocol, - timer=self.timer, - loop=loop, - limit=self._limit, - ) - payload_parser = HttpPayloadParser( - payload, - length=length, - chunked=msg.chunked, - method=method, - compression=msg.compression, - code=self.code, - readall=self.readall, - response_with_body=self.response_with_body, - auto_decompress=self._auto_decompress, - ) - if not payload_parser.done: - self._payload_parser = payload_parser - elif method == METH_CONNECT: - assert isinstance(msg, RawRequestMessage) - payload = StreamReader( - self.protocol, - timer=self.timer, - loop=loop, - limit=self._limit, - ) - self._upgraded = True - self._payload_parser = HttpPayloadParser( - payload, - method=msg.method, - compression=msg.compression, - readall=True, - auto_decompress=self._auto_decompress, - ) - else: - if ( - getattr(msg, "code", 100) >= 199 - and length is None - and self.read_until_eof - ): - payload = StreamReader( - self.protocol, - timer=self.timer, - loop=loop, - limit=self._limit, - ) - payload_parser = HttpPayloadParser( - payload, - length=length, - chunked=msg.chunked, - method=method, - compression=msg.compression, - code=self.code, - readall=True, - response_with_body=self.response_with_body, - auto_decompress=self._auto_decompress, - ) - if not payload_parser.done: - self._payload_parser = payload_parser - else: - payload = EMPTY_PAYLOAD - - messages.append((msg, payload)) - else: - self._tail = data[start_pos:] - data = EMPTY - break - - # no parser, just store - elif self._payload_parser is None and self._upgraded: - assert not self._lines - break - - # feed payload - elif data and start_pos < data_len: - assert not self._lines - assert self._payload_parser is not None - try: - eof, data = self._payload_parser.feed_data(data[start_pos:]) - except BaseException as exc: - if self.payload_exception is not None: - self._payload_parser.payload.set_exception( - self.payload_exception(str(exc)) - ) - else: - self._payload_parser.payload.set_exception(exc) - - eof = True - data = b"" - - if eof: - start_pos = 0 - data_len = len(data) - self._payload_parser = None - continue - else: - break - - if data and start_pos < data_len: - data = data[start_pos:] - else: - data = EMPTY - - return messages, self._upgraded, data - - def parse_headers( - self, lines: List[bytes] - ) -> Tuple[ - "CIMultiDictProxy[str]", RawHeaders, Optional[bool], Optional[str], bool, bool - ]: - """Parses RFC 5322 headers from a stream. - - Line continuations are supported. Returns list of header name - and value pairs. Header name is in upper case. - """ - headers, raw_headers = self._headers_parser.parse_headers(lines) - close_conn = None - encoding = None - upgrade = False - chunked = False - - # keep-alive - conn = headers.get(hdrs.CONNECTION) - if conn: - v = conn.lower() - if v == "close": - close_conn = True - elif v == "keep-alive": - close_conn = False - elif v == "upgrade": - upgrade = True - - # encoding - enc = headers.get(hdrs.CONTENT_ENCODING) - if enc: - enc = enc.lower() - if enc in ("gzip", "deflate", "br"): - encoding = enc - - # chunking - te = headers.get(hdrs.TRANSFER_ENCODING) - if te is not None: - if "chunked" == te.lower(): - chunked = True - else: - raise BadHttpMessage("Request has invalid `Transfer-Encoding`") - - if hdrs.CONTENT_LENGTH in headers: - raise BadHttpMessage( - "Content-Length can't be present with Transfer-Encoding", - ) - - return (headers, raw_headers, close_conn, encoding, upgrade, chunked) - - def set_upgraded(self, val: bool) -> None: - """Set connection upgraded (to websocket) mode. - - :param bool val: new state. - """ - self._upgraded = val - - -class HttpRequestParser(HttpParser[RawRequestMessage]): - """Read request status line. - - Exception .http_exceptions.BadStatusLine - could be raised in case of any errors in status line. - Returns RawRequestMessage. - """ - - def parse_message(self, lines: List[bytes]) -> RawRequestMessage: - # request line - line = lines[0].decode("utf-8", "surrogateescape") - try: - method, path, version = line.split(None, 2) - except ValueError: - raise BadStatusLine(line) from None - - if len(path) > self.max_line_size: - raise LineTooLong( - "Status line is too long", str(self.max_line_size), str(len(path)) - ) - - # method - if not METHRE.match(method): - raise BadStatusLine(method) - - # version - try: - if version.startswith("HTTP/"): - n1, n2 = version[5:].split(".", 1) - version_o = HttpVersion(int(n1), int(n2)) - else: - raise BadStatusLine(version) - except Exception: - raise BadStatusLine(version) - - if method == "CONNECT": - # authority-form, - # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.3 - url = URL.build(authority=path, encoded=True) - elif path.startswith("/"): - # origin-form, - # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.1 - path_part, _hash_separator, url_fragment = path.partition("#") - path_part, _question_mark_separator, qs_part = path_part.partition("?") - - # NOTE: `yarl.URL.build()` is used to mimic what the Cython-based - # NOTE: parser does, otherwise it results into the same - # NOTE: HTTP Request-Line input producing different - # NOTE: `yarl.URL()` objects - url = URL.build( - path=path_part, - query_string=qs_part, - fragment=url_fragment, - encoded=True, - ) - else: - # absolute-form for proxy maybe, - # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.2 - url = URL(path, encoded=True) - - # read headers - ( - headers, - raw_headers, - close, - compression, - upgrade, - chunked, - ) = self.parse_headers(lines) - - if close is None: # then the headers weren't set in the request - if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close - close = True - else: # HTTP 1.1 must ask to close. - close = False - - return RawRequestMessage( - method, - path, - version_o, - headers, - raw_headers, - close, - compression, - upgrade, - chunked, - url, - ) - - -class HttpResponseParser(HttpParser[RawResponseMessage]): - """Read response status line and headers. - - BadStatusLine could be raised in case of any errors in status line. - Returns RawResponseMessage. - """ - - def parse_message(self, lines: List[bytes]) -> RawResponseMessage: - line = lines[0].decode("utf-8", "surrogateescape") - try: - version, status = line.split(None, 1) - except ValueError: - raise BadStatusLine(line) from None - - try: - status, reason = status.split(None, 1) - except ValueError: - reason = "" - - if len(reason) > self.max_line_size: - raise LineTooLong( - "Status line is too long", str(self.max_line_size), str(len(reason)) - ) - - # version - match = VERSRE.match(version) - if match is None: - raise BadStatusLine(line) - version_o = HttpVersion(int(match.group(1)), int(match.group(2))) - - # The status code is a three-digit number - try: - status_i = int(status) - except ValueError: - raise BadStatusLine(line) from None - - if status_i > 999: - raise BadStatusLine(line) - - # read headers - ( - headers, - raw_headers, - close, - compression, - upgrade, - chunked, - ) = self.parse_headers(lines) - - if close is None: - close = version_o <= HttpVersion10 - - return RawResponseMessage( - version_o, - status_i, - reason.strip(), - headers, - raw_headers, - close, - compression, - upgrade, - chunked, - ) - - -class HttpPayloadParser: - def __init__( - self, - payload: StreamReader, - length: Optional[int] = None, - chunked: bool = False, - compression: Optional[str] = None, - code: Optional[int] = None, - method: Optional[str] = None, - readall: bool = False, - response_with_body: bool = True, - auto_decompress: bool = True, - ) -> None: - self._length = 0 - self._type = ParseState.PARSE_NONE - self._chunk = ChunkState.PARSE_CHUNKED_SIZE - self._chunk_size = 0 - self._chunk_tail = b"" - self._auto_decompress = auto_decompress - self.done = False - - # payload decompression wrapper - if response_with_body and compression and self._auto_decompress: - real_payload: Union[StreamReader, DeflateBuffer] = DeflateBuffer( - payload, compression - ) - else: - real_payload = payload - - # payload parser - if not response_with_body: - # don't parse payload if it's not expected to be received - self._type = ParseState.PARSE_NONE - real_payload.feed_eof() - self.done = True - - elif chunked: - self._type = ParseState.PARSE_CHUNKED - elif length is not None: - self._type = ParseState.PARSE_LENGTH - self._length = length - if self._length == 0: - real_payload.feed_eof() - self.done = True - else: - if readall and code != 204: - self._type = ParseState.PARSE_UNTIL_EOF - elif method in ("PUT", "POST"): - internal_logger.warning( # pragma: no cover - "Content-Length or Transfer-Encoding header is required" - ) - self._type = ParseState.PARSE_NONE - real_payload.feed_eof() - self.done = True - - self.payload = real_payload - - def feed_eof(self) -> None: - if self._type == ParseState.PARSE_UNTIL_EOF: - self.payload.feed_eof() - elif self._type == ParseState.PARSE_LENGTH: - raise ContentLengthError( - "Not enough data for satisfy content length header." - ) - elif self._type == ParseState.PARSE_CHUNKED: - raise TransferEncodingError( - "Not enough data for satisfy transfer length header." - ) - - def feed_data( - self, chunk: bytes, SEP: bytes = b"\r\n", CHUNK_EXT: bytes = b";" - ) -> Tuple[bool, bytes]: - # Read specified amount of bytes - if self._type == ParseState.PARSE_LENGTH: - required = self._length - chunk_len = len(chunk) - - if required >= chunk_len: - self._length = required - chunk_len - self.payload.feed_data(chunk, chunk_len) - if self._length == 0: - self.payload.feed_eof() - return True, b"" - else: - self._length = 0 - self.payload.feed_data(chunk[:required], required) - self.payload.feed_eof() - return True, chunk[required:] - - # Chunked transfer encoding parser - elif self._type == ParseState.PARSE_CHUNKED: - if self._chunk_tail: - chunk = self._chunk_tail + chunk - self._chunk_tail = b"" - - while chunk: - - # read next chunk size - if self._chunk == ChunkState.PARSE_CHUNKED_SIZE: - pos = chunk.find(SEP) - if pos >= 0: - i = chunk.find(CHUNK_EXT, 0, pos) - if i >= 0: - size_b = chunk[:i] # strip chunk-extensions - else: - size_b = chunk[:pos] - - try: - size = int(bytes(size_b), 16) - except ValueError: - exc = TransferEncodingError( - chunk[:pos].decode("ascii", "surrogateescape") - ) - self.payload.set_exception(exc) - raise exc from None - - chunk = chunk[pos + 2 :] - if size == 0: # eof marker - self._chunk = ChunkState.PARSE_MAYBE_TRAILERS - else: - self._chunk = ChunkState.PARSE_CHUNKED_CHUNK - self._chunk_size = size - self.payload.begin_http_chunk_receiving() - else: - self._chunk_tail = chunk - return False, b"" - - # read chunk and feed buffer - if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK: - required = self._chunk_size - chunk_len = len(chunk) - - if required > chunk_len: - self._chunk_size = required - chunk_len - self.payload.feed_data(chunk, chunk_len) - return False, b"" - else: - self._chunk_size = 0 - self.payload.feed_data(chunk[:required], required) - chunk = chunk[required:] - self._chunk = ChunkState.PARSE_CHUNKED_CHUNK_EOF - self.payload.end_http_chunk_receiving() - - # toss the CRLF at the end of the chunk - if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK_EOF: - if chunk[:2] == SEP: - chunk = chunk[2:] - self._chunk = ChunkState.PARSE_CHUNKED_SIZE - else: - self._chunk_tail = chunk - return False, b"" - - # if stream does not contain trailer, after 0\r\n - # we should get another \r\n otherwise - # trailers needs to be skiped until \r\n\r\n - if self._chunk == ChunkState.PARSE_MAYBE_TRAILERS: - head = chunk[:2] - if head == SEP: - # end of stream - self.payload.feed_eof() - return True, chunk[2:] - # Both CR and LF, or only LF may not be received yet. It is - # expected that CRLF or LF will be shown at the very first - # byte next time, otherwise trailers should come. The last - # CRLF which marks the end of response might not be - # contained in the same TCP segment which delivered the - # size indicator. - if not head: - return False, b"" - if head == SEP[:1]: - self._chunk_tail = head - return False, b"" - self._chunk = ChunkState.PARSE_TRAILERS - - # read and discard trailer up to the CRLF terminator - if self._chunk == ChunkState.PARSE_TRAILERS: - pos = chunk.find(SEP) - if pos >= 0: - chunk = chunk[pos + 2 :] - self._chunk = ChunkState.PARSE_MAYBE_TRAILERS - else: - self._chunk_tail = chunk - return False, b"" - - # Read all bytes until eof - elif self._type == ParseState.PARSE_UNTIL_EOF: - self.payload.feed_data(chunk, len(chunk)) - - return False, b"" - - -class DeflateBuffer: - """DeflateStream decompress stream and feed data into specified stream.""" - - decompressor: Any - - def __init__(self, out: StreamReader, encoding: Optional[str]) -> None: - self.out = out - self.size = 0 - self.encoding = encoding - self._started_decoding = False - - if encoding == "br": - if not HAS_BROTLI: # pragma: no cover - raise ContentEncodingError( - "Can not decode content-encoding: brotli (br). " - "Please install `Brotli`" - ) - - class BrotliDecoder: - # Supports both 'brotlipy' and 'Brotli' packages - # since they share an import name. The top branches - # are for 'brotlipy' and bottom branches for 'Brotli' - def __init__(self) -> None: - self._obj = brotli.Decompressor() - - def decompress(self, data: bytes) -> bytes: - if hasattr(self._obj, "decompress"): - return cast(bytes, self._obj.decompress(data)) - return cast(bytes, self._obj.process(data)) - - def flush(self) -> bytes: - if hasattr(self._obj, "flush"): - return cast(bytes, self._obj.flush()) - return b"" - - self.decompressor = BrotliDecoder() - else: - zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS - self.decompressor = zlib.decompressobj(wbits=zlib_mode) - - def set_exception(self, exc: BaseException) -> None: - self.out.set_exception(exc) - - def feed_data(self, chunk: bytes, size: int) -> None: - if not size: - return - - self.size += size - - # RFC1950 - # bits 0..3 = CM = 0b1000 = 8 = "deflate" - # bits 4..7 = CINFO = 1..7 = windows size. - if ( - not self._started_decoding - and self.encoding == "deflate" - and chunk[0] & 0xF != 8 - ): - # Change the decoder to decompress incorrectly compressed data - # Actually we should issue a warning about non-RFC-compliant data. - self.decompressor = zlib.decompressobj(wbits=-zlib.MAX_WBITS) - - try: - chunk = self.decompressor.decompress(chunk) - except Exception: - raise ContentEncodingError( - "Can not decode content-encoding: %s" % self.encoding - ) - - self._started_decoding = True - - if chunk: - self.out.feed_data(chunk, len(chunk)) - - def feed_eof(self) -> None: - chunk = self.decompressor.flush() - - if chunk or self.size > 0: - self.out.feed_data(chunk, len(chunk)) - if self.encoding == "deflate" and not self.decompressor.eof: - raise ContentEncodingError("deflate") - - self.out.feed_eof() - - def begin_http_chunk_receiving(self) -> None: - self.out.begin_http_chunk_receiving() - - def end_http_chunk_receiving(self) -> None: - self.out.end_http_chunk_receiving() - - -HttpRequestParserPy = HttpRequestParser -HttpResponseParserPy = HttpResponseParser -RawRequestMessagePy = RawRequestMessage -RawResponseMessagePy = RawResponseMessage - -try: - if not NO_EXTENSIONS: - from ._http_parser import ( # type: ignore[import,no-redef] - HttpRequestParser, - HttpResponseParser, - RawRequestMessage, - RawResponseMessage, - ) - - HttpRequestParserC = HttpRequestParser - HttpResponseParserC = HttpResponseParser - RawRequestMessageC = RawRequestMessage - RawResponseMessageC = RawResponseMessage -except ImportError: # pragma: no cover - pass diff --git a/spaces/arxnov/anotest/api.py b/spaces/arxnov/anotest/api.py deleted file mode 100644 index 14bba3b1f13565a0d63e2f1b4440f6d43e97210c..0000000000000000000000000000000000000000 --- a/spaces/arxnov/anotest/api.py +++ /dev/null @@ -1,304 +0,0 @@ -import os -import requests -import json -from io import BytesIO - -from fastapi import FastAPI, Response, File -from fastapi.staticfiles import StaticFiles -from fastapi.responses import FileResponse, StreamingResponse -from pydantic import BaseModel, Field -from fastapi.middleware.cors import CORSMiddleware - -import argparse -import re -import tempfile -#import logging - -#logging.getLogger('numba').setLevel(logging.WARNING) -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -#import gradio as gr -#import gradio.utils as gr_utils -#import gradio.processing_utils as gr_processing_utils -import ONNXVITS_infer -import models -from text import text_to_sequence, _clean_text -from text.symbols import symbols -from mel_processing import spectrogram_torch -import psutil -from datetime import datetime - -from pydub import AudioSegment - - -language_marks = { - "Japanese": "", - "日本語": "[JA]", - "简体中文": "[ZH]", - "English": "[EN]", - "Mix": "", -} - -# limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces -limitation = False - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, language, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - if language is not None: - text = language_marks[language] + text + language_marks[language] - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False) - spec_lengths = LongTensor([spec.size(-1)]) - sid_src = LongTensor([original_speaker_id]) - sid_tgt = LongTensor([target_speaker_id]) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - -def to_16bit_audio(audio : np.ndarray): - audio_samples = np.array(audio * (2**15 - 1), dtype=np.int16) - - return audio_samples - -models_tts = [] -models_vc = [] -models_info = [ - { - "title": "Trilingual", - "languages": ['日本語', '简体中文', 'English', 'Mix'], - "description": """ - This model is trained on a mix up of Umamusume, Genshin Impact, Sanoba Witch & VCTK voice data to learn multilanguage. - All characters can speak English, Chinese & Japanese.\n\n - To mix multiple languages in a single sentence, wrap the corresponding part with language tokens - ([JA] for Japanese, [ZH] for Chinese, [EN] for English), as shown in the examples.\n\n - 这个模型在赛马娘,原神,魔女的夜宴以及VCTK数据集上混合训练以学习多种语言。 - 所有角色均可说中日英三语。\n\n - 若需要在同一个句子中混合多种语言,使用相应的语言标记包裹句子。 - (日语用[JA], 中文用[ZH], 英文用[EN]),参考Examples中的示例。 - """, - "model_path": "./pretrained_models/G_trilingual.pth", - "config_path": "./configs/uma_trilingual.json", - "examples": [['你好,训练员先生,很高兴见到你。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', '简体中文', 1, False], - ['To be honest, I have no idea what to say as examples.', '派蒙 Paimon (Genshin Impact)', 'English', - 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '綾地 寧々 Ayachi Nene (Sanoba Witch)', '日本語', 1, False], - ['[JA]こんにちわ。[JA][ZH]你好![ZH][EN]Hello![EN]', '綾地 寧々 Ayachi Nene (Sanoba Witch)', 'Mix', 1, False]], - "onnx_dir": "./ONNX_net/G_trilingual/" - }, - { - "title": "Japanese", - "languages": ["Japanese"], - "description": """ - This model contains 87 characters from Umamusume: Pretty Derby, Japanese only.\n\n - 这个模型包含赛马娘的所有87名角色,只能合成日语。 - """, - "model_path": "./pretrained_models/G_jp.pth", - "config_path": "./configs/uma87.json", - "examples": [['お疲れ様です,トレーナーさん。', '无声铃鹿 Silence Suzuka (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['張り切っていこう!', '北部玄驹 Kitasan Black (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['何でこんなに慣れでんのよ,私のほが先に好きだっだのに。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '目白麦昆 Mejiro Mcqueen (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['お帰りなさい,お兄様!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['私の処女をもらっでください!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False]], - "onnx_dir": "./ONNX_net/G_jp/" - }, -] - - -#parser = argparse.ArgumentParser() -#parser.add_argument("--share", action="store_true", default=False, help="share gradio app") -#args = parser.parse_args() -print('info') -for info in models_info: - print(info) - name = info['title'] - lang = info['languages'] - examples = info['examples'] - config_path = info['config_path'] - model_path = info['model_path'] - description = info['description'] - onnx_dir = info["onnx_dir"] - hps = utils.get_hparams_from_file(config_path) - model = ONNXVITS_infer.SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - ONNX_dir=onnx_dir, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = hps.speakers - speakers = list(hps.speakers.keys()) - models_tts.append((name, description, speakers, lang, examples, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps))) - models_vc.append((name, description, speakers, create_vc_fn(model, hps, speaker_ids))) - -print('app start') -app = FastAPI() -os.makedirs("outputs", exist_ok=True) - -app.mount("/outputs", StaticFiles(directory="outputs", html=True), name="outputs") -origins = [ - "http://localhost:8000", - "http://localhost", -] - -app.add_middleware( - CORSMiddleware, - allow_origins=origins, - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -@app.head("/") -@app.get("/") -async def index(): - return "testing" - - -class TTSRequest(BaseModel): - modelIdx: int - speakerIdx: int - text: str - language: str = None - speed: float = 1 - is_symbol: bool = False - -@app.post("/speak") -async def ttspeak(req: TTSRequest): - print('/speak') - (name, description, speakers, lang, examples, - symbols, tts_fn, symbol_fn) = models_tts[req.modelIdx] - print('model is', name) - speaker = speakers[req.speakerIdx] - print('speaker is', speaker) - (s, data) = tts_fn(req.text, speaker, req.language, req.speed, req.is_symbol) - - audio_rate = data[0] - audio_narray = to_16bit_audio(data[1]) - - audio_file = AudioSegment( - audio_narray.tobytes(), - frame_rate=audio_rate, - sample_width=2, - channels=1 - ) - - print('duration:', audio_file.duration_seconds) - - audio_file.export("outputs/sample.wav", format="wav") - - return FileResponse("outputs/sample.wav") - - - - return req - -@app.get("/models") -async def models(): - return models_info - -@app.get("/sample") -async def sample(): - print('tts_fn') - (name, description, speakers, lang, examples, - symbols, tts_fn, symbol_fn) = models_tts[0] - print(name) - print(description) - print(speakers) - print(lang) - print(examples) - print('--------------') - print(examples[2]) - (s, data) = tts_fn(*examples[2]) - print(s) - print(data[0]) - - audio_rate = data[0] - audio_narray = to_16bit_audio(data[1]) - - audio_file = AudioSegment( - audio_narray.tobytes(), - frame_rate=audio_rate, - sample_width=2, - channels=1 - ) - - audio_file.export("sample.wav", format="wav") - - return FileResponse("sample.wav") - - #return File("sample.wav", filename="sample.wav") - - #print('tts_fn result') - #print(out[0]) - #print(out[1][0]) - - #return Response(content=audio_narray.tobytes()) diff --git a/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/gym-setup/upload.component.html b/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/gym-setup/upload.component.html deleted file mode 100644 index b3ceb3f3df2b2636c7f05239ab40bee2689caf53..0000000000000000000000000000000000000000 --- a/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/gym-setup/upload.component.html +++ /dev/null @@ -1,16 +0,0 @@ -
-
-
Paper to Code
-
We will generate the code for this paper
-
-
-
- -
-
-
- - -
- -
diff --git a/spaces/avans06/whisper-webui-translate/src/download.py b/spaces/avans06/whisper-webui-translate/src/download.py deleted file mode 100644 index f9a616977d9a7642925a9218a4a82cf400036521..0000000000000000000000000000000000000000 --- a/spaces/avans06/whisper-webui-translate/src/download.py +++ /dev/null @@ -1,79 +0,0 @@ -from tempfile import mkdtemp -from typing import List -from yt_dlp import YoutubeDL - -import yt_dlp -from yt_dlp.postprocessor import PostProcessor - -class FilenameCollectorPP(PostProcessor): - def __init__(self): - super(FilenameCollectorPP, self).__init__(None) - self.filenames = [] - - def run(self, information): - self.filenames.append(information["filepath"]) - return [], information - -def download_url(url: str, maxDuration: int = None, destinationDirectory: str = None, playlistItems: str = "1") -> List[str]: - try: - return _perform_download(url, maxDuration=maxDuration, outputTemplate=None, destinationDirectory=destinationDirectory, playlistItems=playlistItems) - except yt_dlp.utils.DownloadError as e: - # In case of an OS error, try again with a different output template - if e.msg and e.msg.find("[Errno 36] File name too long") >= 0: - return _perform_download(url, maxDuration=maxDuration, outputTemplate="%(title).10s %(id)s.%(ext)s") - pass - -def _perform_download(url: str, maxDuration: int = None, outputTemplate: str = None, destinationDirectory: str = None, playlistItems: str = "1", onlyAudio: bool = False): - # Create a temporary directory to store the downloaded files - if destinationDirectory is None: - destinationDirectory = mkdtemp() - - ydl_opts = { - "format": "bestaudio/best" if onlyAudio else "bestvideo[ext=mp4][vcodec^=avc1]+bestaudio[ext=m4a]/best", - 'paths': { - 'home': destinationDirectory - }, - "ignoreerrors": True - } - if (playlistItems): - ydl_opts['playlist_items'] = playlistItems - - # Add output template if specified - if outputTemplate: - ydl_opts['outtmpl'] = outputTemplate - - filename_collector = FilenameCollectorPP() - - with YoutubeDL(ydl_opts) as ydl: - if maxDuration and maxDuration > 0: - info = ydl.extract_info(url, download=False) - entries = "entries" in info and info["entries"] or [info] - - total_duration = 0 - - # Compute total duration - for entry in entries: - total_duration += float(entry["duration"]) - - if total_duration >= maxDuration: - raise ExceededMaximumDuration(videoDuration=total_duration, maxDuration=maxDuration, message="Video is too long") - - ydl.add_post_processor(filename_collector) - ydl.download([url]) - - if len(filename_collector.filenames) <= 0: - raise Exception("Cannot download " + url) - - result = [] - - for filename in filename_collector.filenames: - result.append(filename) - print("Downloaded " + filename) - - return result - -class ExceededMaximumDuration(Exception): - def __init__(self, videoDuration, maxDuration, message): - self.videoDuration = videoDuration - self.maxDuration = maxDuration - super().__init__(message) \ No newline at end of file diff --git a/spaces/avivdm1/AutoGPT/autogpt/app.py b/spaces/avivdm1/AutoGPT/autogpt/app.py deleted file mode 100644 index 58d9f7164ddfbb5019b072d789dc2fa6205dc9d3..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/autogpt/app.py +++ /dev/null @@ -1,330 +0,0 @@ -""" Command and Control """ -import json -from typing import Dict, List, NoReturn, Union - -from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.analyze_code import analyze_code -from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.execute_code import ( - execute_python_file, - execute_shell, - execute_shell_popen, -) -from autogpt.commands.file_operations import ( - append_to_file, - delete_file, - download_file, - read_file, - search_files, - write_to_file, -) -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.image_gen import generate_image -from autogpt.commands.improve_code import improve_code -from autogpt.commands.twitter import send_tweet -from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.write_tests import write_tests -from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_and_parse_json -from autogpt.memory import get_memory -from autogpt.processing.text import summarize_text -from autogpt.speech import say_text - -CFG = Config() -AGENT_MANAGER = AgentManager() - - -def is_valid_int(value: str) -> bool: - """Check if the value is a valid integer - - Args: - value (str): The value to check - - Returns: - bool: True if the value is a valid integer, False otherwise - """ - try: - int(value) - return True - except ValueError: - return False - - -def get_command(response_json: Dict): - """Parse the response and return the command name and arguments - - Args: - response_json (json): The response from the AI - - Returns: - tuple: The command name and arguments - - Raises: - json.decoder.JSONDecodeError: If the response is not valid JSON - - Exception: If any other error occurs - """ - try: - if "command" not in response_json: - return "Error:", "Missing 'command' object in JSON" - - if not isinstance(response_json, dict): - return "Error:", f"'response_json' object is not dictionary {response_json}" - - command = response_json["command"] - if not isinstance(command, dict): - return "Error:", "'command' object is not a dictionary" - - if "name" not in command: - return "Error:", "Missing 'name' field in 'command' object" - - command_name = command["name"] - - # Use an empty dictionary if 'args' field is not present in 'command' object - arguments = command.get("args", {}) - - return command_name, arguments - except json.decoder.JSONDecodeError: - return "Error:", "Invalid JSON" - # All other errors, return "Error: + error message" - except Exception as e: - return "Error:", str(e) - - -def map_command_synonyms(command_name: str): - """Takes the original command name given by the AI, and checks if the - string matches a list of common/known hallucinations - """ - synonyms = [ - ("write_file", "write_to_file"), - ("create_file", "write_to_file"), - ("search", "google"), - ] - for seen_command, actual_command_name in synonyms: - if command_name == seen_command: - return actual_command_name - return command_name - - -def execute_command(command_name: str, arguments): - """Execute the command and return the result - - Args: - command_name (str): The name of the command to execute - arguments (dict): The arguments for the command - - Returns: - str: The result of the command - """ - try: - command_name = map_command_synonyms(command_name.lower()) - if command_name == "google": - # Check if the Google API key is set and use the official search method - # If the API key is not set or has only whitespaces, use the unofficial - # search method - key = CFG.google_api_key - if key and key.strip() and key != "your-google-api-key": - google_result = google_official_search(arguments["input"]) - return google_result - else: - google_result = google_search(arguments["input"]) - - # google_result can be a list or a string depending on the search results - if isinstance(google_result, list): - safe_message = [ - google_result_single.encode("utf-8", "ignore") - for google_result_single in google_result - ] - else: - safe_message = google_result.encode("utf-8", "ignore") - - return safe_message.decode("utf-8") - elif command_name == "memory_add": - memory = get_memory(CFG) - return memory.add(arguments["string"]) - elif command_name == "start_agent": - return start_agent( - arguments["name"], arguments["task"], arguments["prompt"] - ) - elif command_name == "message_agent": - return message_agent(arguments["key"], arguments["message"]) - elif command_name == "list_agents": - return list_agents() - elif command_name == "delete_agent": - return delete_agent(arguments["key"]) - elif command_name == "get_text_summary": - return get_text_summary(arguments["url"], arguments["question"]) - elif command_name == "get_hyperlinks": - return get_hyperlinks(arguments["url"]) - elif command_name == "clone_repository": - return clone_repository( - arguments["repository_url"], arguments["clone_path"] - ) - elif command_name == "read_file": - return read_file(arguments["file"]) - elif command_name == "write_to_file": - return write_to_file(arguments["file"], arguments["text"]) - elif command_name == "append_to_file": - return append_to_file(arguments["file"], arguments["text"]) - elif command_name == "delete_file": - return delete_file(arguments["file"]) - elif command_name == "search_files": - return search_files(arguments["directory"]) - elif command_name == "download_file": - if not CFG.allow_downloads: - return "Error: You do not have user authorization to download files locally." - return download_file(arguments["url"], arguments["file"]) - elif command_name == "browse_website": - return browse_website(arguments["url"], arguments["question"]) - # TODO: Change these to take in a file rather than pasted code, if - # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again" - elif command_name == "analyze_code": - return analyze_code(arguments["code"]) - elif command_name == "improve_code": - return improve_code(arguments["suggestions"], arguments["code"]) - elif command_name == "write_tests": - return write_tests(arguments["code"], arguments.get("focus")) - elif command_name == "execute_python_file": # Add this command - return execute_python_file(arguments["file"]) - elif command_name == "execute_shell": - if CFG.execute_local_commands: - return execute_shell(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "execute_shell_popen": - if CFG.execute_local_commands: - return execute_shell_popen(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "read_audio_from_file": - return read_audio_from_file(arguments["file"]) - elif command_name == "generate_image": - return generate_image(arguments["prompt"]) - elif command_name == "send_tweet": - return send_tweet(arguments["text"]) - elif command_name == "do_nothing": - return "No action performed." - elif command_name == "task_complete": - shutdown() - else: - return ( - f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" - " list for available commands and only respond in the specified JSON" - " format." - ) - except Exception as e: - return f"Error: {str(e)}" - - -def get_text_summary(url: str, question: str) -> str: - """Return the results of a Google search - - Args: - url (str): The url to scrape - question (str): The question to summarize the text for - - Returns: - str: The summary of the text - """ - text = scrape_text(url) - summary = summarize_text(url, text, question) - return f""" "Result" : {summary}""" - - -def get_hyperlinks(url: str) -> Union[str, List[str]]: - """Return the results of a Google search - - Args: - url (str): The url to scrape - - Returns: - str or list: The hyperlinks on the page - """ - return scrape_links(url) - - -def shutdown() -> NoReturn: - """Shut down the program""" - print("Shutting down...") - quit() - - -def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: - """Start an agent with a given name, task, and prompt - - Args: - name (str): The name of the agent - task (str): The task of the agent - prompt (str): The prompt for the agent - model (str): The model to use for the agent - - Returns: - str: The response of the agent - """ - # Remove underscores from name - voice_name = name.replace("_", " ") - - first_message = f"""You are {name}. Respond with: "Acknowledged".""" - agent_intro = f"{voice_name} here, Reporting for duty!" - - # Create agent - if CFG.speak_mode: - say_text(agent_intro, 1) - key, ack = AGENT_MANAGER.create_agent(task, first_message, model) - - if CFG.speak_mode: - say_text(f"Hello {voice_name}. Your task is as follows. {task}.") - - # Assign task (prompt), get response - agent_response = AGENT_MANAGER.message_agent(key, prompt) - - return f"Agent {name} created with key {key}. First response: {agent_response}" - - -def message_agent(key: str, message: str) -> str: - """Message an agent with a given key and message""" - # Check if the key is a valid integer - if is_valid_int(key): - agent_response = AGENT_MANAGER.message_agent(int(key), message) - else: - return "Invalid key, must be an integer." - - # Speak response - if CFG.speak_mode: - say_text(agent_response, 1) - return agent_response - - -def list_agents(): - """List all agents - - Returns: - str: A list of all agents - """ - return "List of agents:\n" + "\n".join( - [str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()] - ) - - -def delete_agent(key: str) -> str: - """Delete an agent with a given key - - Args: - key (str): The key of the agent to delete - - Returns: - str: A message indicating whether the agent was deleted or not - """ - result = AGENT_MANAGER.delete_agent(key) - return f"Agent {key} deleted." if result else f"Agent {key} does not exist." diff --git a/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/ActorSim-backup-app.py b/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/ActorSim-backup-app.py deleted file mode 100644 index be0d8956c74ed2ff0fc82274c5f4bb3dddc8a3a6..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/ActorSim-backup-app.py +++ /dev/null @@ -1,120 +0,0 @@ -import streamlit as st -import random -import pandas as pd -from datetime import datetime - -# Define the game mechanics - -def generate_scenario(): - scenarios = ['🦸 You are a superhero saving the world from a meteorite', - '🏴‍☠️ You are a pirate searching for treasure on a deserted island', - '👨‍🍳 You are a chef trying to win a cooking competition', - '🕵️ You are a detective solving a murder case'] - return random.choice(scenarios) - -def calculate_score(slider_values): - bluffing_score, deduction_score, humor_score, memory_score, roleplay_score = slider_values - - total_score = bluffing_score + deduction_score + humor_score + memory_score + roleplay_score - return total_score - -def play_game(slider_values): - scenario = generate_scenario() - st.write('🎭 Act out the following scenario: ' + scenario) - total_score = calculate_score(slider_values) - st.write('🎯 Your total score is: ' + str(total_score)) - - # Save game history to a dataframe - game_history_df = pd.DataFrame({'Scenario': [scenario], - 'Bluffing': [slider_values[0]], - 'Deduction': [slider_values[1]], - 'Humor': [slider_values[2]], - 'Memory': [slider_values[3]], - 'Roleplay': [slider_values[4]], - 'Total Score': [total_score]}) - - # Append to existing game history - try: - existing_game_history = pd.read_csv('game_history.csv') - game_history_df = pd.concat([existing_game_history, game_history_df], ignore_index=True) - except: - pass - - return game_history_df - -def save_game_history(game_history_df): - game_history_df.to_csv('game_history.csv', index=False) - st.write('📝 Game history saved!') - st.write(game_history_df) - -def save_simulation_results(simulation_results_df): - filename = datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.csv' - simulation_results_df.to_csv(filename, index=False) - st.write('📝 Simulation results saved!') - st.write(simulation_results_df) - -def run_simulations(num_simulations): - total_scores = [] - simulation_results_df = pd.DataFrame(columns=['Scenario', 'Bluffing', 'Deduction', 'Humor', 'Memory', 'Roleplay', 'Total Score']) - for i in range(num_simulations): - slider_values = [random.randint(1, 10) for i in range(5)] - total_score = calculate_score(slider_values) - total_scores.append(total_score) - scenario = generate_scenario() - simulation_results_df = simulation_results_df.append({'Scenario': scenario, - 'Bluffing': slider_values[0], - 'Deduction': slider_values[1], - 'Humor': slider_values[2], - 'Memory': slider_values[3], - 'Roleplay': slider_values[4], - 'Total Score': total_score}, ignore_index=True) - st.write('🎲 Average score from ' + str(num_simulations) + ' simulations: ' + str(sum(total_scores)/len(total_scores))) - st.write(simulation_results_df) - save_simulation_results(simulation_results_df) - - -# Define the Streamlit app - -st.title('🎭 Acting Game Mechanics') -st.write('🎯 Welcome to the Acting Game Mechanics! This game measures your ability to bluff, deduce, use humor, remember details, and role-play. Drag the sliders to the left or right to adjust each skill, and click 🎭 Play to act out a scenario and receive a score.') - -slider_values = [st.slider('🃏 Bluffing', 1, 10, 5), -st.slider('🕵️ Deduction', 1, 10, 5), -st.slider('😂 Humor', 1, 10, 5), -st.slider('🧠 Memory', 1, 10, 5), -st.slider('👥 Roleplay', 1, 10, 5)] - -if st.button('🎭 Play'): - game_history_df = play_game(slider_values) - save_game_history(game_history_df) - -if st.button('🎲 Run simulations'): - num_simulations = st.slider('🔁 Number of simulations', 1, 100000, 1000) - run_simulations(num_simulations) - -if st.button('📝 Show all game history'): - try: - game_history_df = pd.read_csv('game_history.csv') - st.write(game_history_df) - except: - st.write('No game history found') - -if st.button('📝 Download game history'): - try: - game_history_df = pd.read_csv('game_history.csv') - filename = 'game_history_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.csv' - game_history_df.to_csv(filename, index=False) - st.write('📝 Game history downloaded!') - st.write(game_history_df) - except: - st.write('No game history found') - -if st.button('📝 Download simulation results'): - try: - simulation_results_df = pd.read_csv('simulation_results.csv') - filename = 'simulation_results_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.csv' - simulation_results_df.to_csv(filename, index=False) - st.write('📝 Simulation results downloaded!') - st.write(simulation_results_df) - except: - st.write('No simulation results found') diff --git a/spaces/awacke1/AframeHTML5Demo/style.css b/spaces/awacke1/AframeHTML5Demo/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AframeHTML5Demo/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awinml/alpaca-cpp/app.py b/spaces/awinml/alpaca-cpp/app.py deleted file mode 100644 index 6614bd3a0eae9aebe2bc6571a854b641f8acae27..0000000000000000000000000000000000000000 --- a/spaces/awinml/alpaca-cpp/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr -from llama_cpp import Llama - -llm = Llama(model_path="ggml-alpaca-7b-q4.bin", n_ctx=256, n_batch=128) - -def generate_text(input_text): - output = llm(f"{input_text}", max_tokens=128, stop=["Q:", "\n", "#"], echo=False) - return output['choices'][0]['text'] - -input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text") -output_text = gr.outputs.Textbox(label="Output text") - -gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Alpaca GGML").launch() - diff --git a/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshDistanceMaterial.js b/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshDistanceMaterial.js deleted file mode 100644 index 2fa54c5e77d3649e197658f7255d3bc4ffaa78eb..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshDistanceMaterial.js +++ /dev/null @@ -1,84 +0,0 @@ -import { Material } from './Material.js'; -import { Vector3 } from '../math/Vector3.js'; - -/** - * @author WestLangley / http://github.com/WestLangley - * - * parameters = { - * - * referencePosition: , - * nearDistance: , - * farDistance: , - * - * skinning: , - * morphTargets: , - * - * map: new THREE.Texture( ), - * - * alphaMap: new THREE.Texture( ), - * - * displacementMap: new THREE.Texture( ), - * displacementScale: , - * displacementBias: - * - * } - */ - -function MeshDistanceMaterial( parameters ) { - - Material.call( this ); - - this.type = 'MeshDistanceMaterial'; - - this.referencePosition = new Vector3(); - this.nearDistance = 1; - this.farDistance = 1000; - - this.skinning = false; - this.morphTargets = false; - - this.map = null; - - this.alphaMap = null; - - this.displacementMap = null; - this.displacementScale = 1; - this.displacementBias = 0; - - this.fog = false; - this.lights = false; - - this.setValues( parameters ); - -} - -MeshDistanceMaterial.prototype = Object.create( Material.prototype ); -MeshDistanceMaterial.prototype.constructor = MeshDistanceMaterial; - -MeshDistanceMaterial.prototype.isMeshDistanceMaterial = true; - -MeshDistanceMaterial.prototype.copy = function ( source ) { - - Material.prototype.copy.call( this, source ); - - this.referencePosition.copy( source.referencePosition ); - this.nearDistance = source.nearDistance; - this.farDistance = source.farDistance; - - this.skinning = source.skinning; - this.morphTargets = source.morphTargets; - - this.map = source.map; - - this.alphaMap = source.alphaMap; - - this.displacementMap = source.displacementMap; - this.displacementScale = source.displacementScale; - this.displacementBias = source.displacementBias; - - return this; - -}; - - -export { MeshDistanceMaterial }; diff --git a/spaces/bankholdup/stylegan_petbreeder/e4e/configs/__init__.py b/spaces/bankholdup/stylegan_petbreeder/e4e/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/bcg-unet/demo/bcgunet/bcgunet.py b/spaces/bcg-unet/demo/bcgunet/bcgunet.py deleted file mode 100644 index 4c07ca28ff48707b3805ae89e1aa9c6211ec5f6b..0000000000000000000000000000000000000000 --- a/spaces/bcg-unet/demo/bcgunet/bcgunet.py +++ /dev/null @@ -1,148 +0,0 @@ -from os.path import * -import numpy as np -import random -import torch -import torch.nn as nn -import time -import tqdm -from scipy.signal import butter, sosfilt -from .unet import UNet1d - - -def butter_bandpass_filter(data, lowcut, highcut, fs, order=5): - nyq = 0.5 * fs - low = lowcut / nyq - high = highcut / nyq - sos = butter(order, [low, high], analog=False, btype="band", output="sos") - y = sosfilt(sos, data) - return y - - -def norm(ecg): - min1, max1 = np.percentile(ecg, [1, 99]) - ecg[ecg > max1] = max1 - ecg[ecg < min1] = min1 - ecg = (ecg - min1) / (max1 - min1) - return ecg - - -def run( - input_eeg, - input_ecg=None, - sfreq=5000, - iter_num=5000, - winsize_sec=2, - lr=1e-3, - onecycle=True, -): - window = winsize_sec * sfreq - eeg_raw = input_eeg - eeg_channel = eeg_raw.shape[0] - - eeg_filtered = eeg_raw * 0 - t = time.time() - for ii in range(eeg_channel): - eeg_filtered[ii, ...] = butter_bandpass_filter( - eeg_raw[ii, :], 0.5, sfreq * 0.4, sfreq - ) - - baseline = eeg_raw - eeg_filtered - - if input_ecg is None: - from sklearn.decomposition import PCA - - pca = PCA(n_components=1) - ecg = norm(pca.fit_transform(eeg_filtered.T)[:, 0].flatten()) - else: - ecg = norm(input_ecg.flatten()) - - torch.cuda.empty_cache() - device = "cuda" if torch.cuda.is_available() else "cpu" - NET = UNet1d(n_channels=1, n_classes=eeg_channel, nfilter=8).to(device) - optimizer = torch.optim.Adam(NET.parameters(), lr=lr) - optimizer.zero_grad() - maxlen = ecg.size - if onecycle: - scheduler = torch.optim.lr_scheduler.OneCycleLR( - optimizer, lr, total_steps=iter_num - ) - else: - # constant learning rate - scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=1) - - loss_list = [] - - # randomly get windows in ECG signal - - index_all = (np.random.random_sample(iter_num) * (maxlen - window)).astype(int) - - pbar = tqdm.tqdm(index_all) - count = 0 - for index in pbar: - count += 1 - ECG = ecg[index : (index + window)] - EEG = eeg_filtered[:, index : (index + window)] - ECG_d = torch.from_numpy(ECG[None, ...][None, ...]).to(device).float() - EEG_d = torch.from_numpy(EEG[None, ...]).to(device).float() - - # step 3: forward path of UNET - logits = NET(ECG_d) - loss = nn.MSELoss()(logits, EEG_d) - loss_list.append(loss.item()) - - # Step 5: Perform back-propagation - loss.backward() # accumulate the gradients - optimizer.step() # Update network weights according to the optimizer - optimizer.zero_grad() # empty the gradients - scheduler.step() - - if count % 50 == 0: - pbar.set_description( - f"Loss {np.mean(loss_list):.3f}, lr: {optimizer.param_groups[0]['lr']:.5f}" - ) - loss_list = [] - - EEG = eeg_filtered - # ECG = norm(butter_bandpass_filter(data['ECG'], 0.5, 20, sfreq)) - ECG = ecg - ECG_d = torch.from_numpy(ECG[None, ...][None, ...]).to(device).float() - EEG_d = torch.from_numpy(EEG[None, ...]).to(device).float() - with torch.no_grad(): - logits = NET(ECG_d) - BCG_pred = logits.cpu().detach().numpy()[0, ...] - - neweeg = EEG - BCG_pred + baseline - - return neweeg - - -def morlet_psd(signal, sample_rate=5000, freq=10, wavelet="morl"): - import pywt - - # Define the wavelet and scales to be used - - scales = np.arange(sample_rate) - freqs = pywt.scale2frequency("morl", scales) * sample_rate - indx = np.argmin(abs(freqs - freq)) - - scale = scales[indx] - - # scale = pywt.frequency2scale('morl', freq/sample_rate) - - # Calculate the wavelet coefficients - coeffs, freq = pywt.cwt(signal, scale, wavelet, 1 / sample_rate) - # Calculate the power (magnitude squared) of the coefficients - power = np.abs(coeffs) ** 2 - - # Average the power across time to get the power spectral density - psd = np.mean(power, axis=1) - - return psd - - -def get_psd(eeg, sfreq=5000, freq=10): - psd = [] - for ii in tqdm.tqdm(range(eeg.shape[0])): - psd.append(morlet_psd(eeg[ii], sample_rate=sfreq, freq=freq)) - - return np.array(psd) diff --git a/spaces/betterme/Nice/pages/chatbase.py b/spaces/betterme/Nice/pages/chatbase.py deleted file mode 100644 index fe9fef68df2b26e0e03249749bd61555125e1dd5..0000000000000000000000000000000000000000 --- a/spaces/betterme/Nice/pages/chatbase.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Project : AI. @by PyCharm -# @File : chatpdf -# @Time : 2023/4/25 17:01 -# @Author : betterme -# @WeChat : meutils -# @Software : PyCharm -# @Description : - -from meutils.pipe import * -from chatllm.applications import ChatBase -from chatllm.utils import load_llm4chat - -import streamlit as st -from appzoo.streamlit_app.utils import display_pdf, reply4input - -st.set_page_config('🔥ChatLLM', layout='centered', initial_sidebar_state='collapsed') - - -@st.cache_resource -def get_chat_func(): - chat_func = load_llm4chat("THUDM/chatglm-6b-int4") - return chat_func - - -chat_func = get_chat_func() - -qa = ChatBase(chat_func=chat_func) - - -def reply_func(query): - for response, _ in qa(query=query): - yield response - - -# def reply_func(x): -# for i in range(10): -# time.sleep(1) -# x += str(i) -# yield x - - -container = st.container() # 占位符 -text = st.text_area(label="用户输入", height=100, placeholder="请在这儿输入您的问题") -# knowledge_base = st.sidebar.text_area(label="知识库", height=100, placeholder="请在这儿输入您的问题") - -if st.button("发送", key="predict"): - with st.spinner("AI正在思考,请稍等........"): - history = st.session_state.get('state') - st.session_state["state"] = reply4input(text, history, container=container, reply_func=reply_func) diff --git "a/spaces/betterme/Nice/pages/\345\274\271\347\252\227.py" "b/spaces/betterme/Nice/pages/\345\274\271\347\252\227.py" deleted file mode 100644 index 7e92a529b06f2c3f9c51d2382b505b30f2ed4e5f..0000000000000000000000000000000000000000 --- "a/spaces/betterme/Nice/pages/\345\274\271\347\252\227.py" +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Project : AI. @by PyCharm -# @File : 弹窗 -# @Time : 2023/4/2 12:35 -# @Author : betterme -# @WeChat : meutils -# @Software : PyCharm -# @Description : - -from meutils.pipe import * -import streamlit as st -from streamlit_modal import Modal - -import streamlit.components.v1 as components - -modal = Modal("Demo Modal", key='k') -open_modal = st.button("Open") -if open_modal: - modal.open() - -if modal.is_open(): - with modal.container(): - st.write("Text goes here") - - html_string = ''' -

HTML string in RED

- - - ''' - components.html(html_string) - - st.write("Some fancy text") - value = st.checkbox("Check me") - st.write(f"Checkbox checked: {value}") - -# -# from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline -# model = AutoModelForQuestionAnswering.from_pretrained('uer/roberta-base-chinese-extractive-qa') -# tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-chinese-extractive-qa') -# QA = pipeline('question-answering', model=model, tokenizer=tokenizer) -# QA_input = {'question': "著名诗歌《假如生活欺骗了你》的作者是",'context': "普希金从那里学习人民的语言,吸取了许多有益的养料,这一切对普希金后来的创作产生了很大的影响。这两年里,普希金创作了不少优秀的作品,如《囚徒》、《致大海》、《致凯恩》和《假如生活欺骗了你》等几十首抒情诗,叙事诗《努林伯爵》,历史剧《鲍里斯·戈都诺夫》,以及《叶甫盖尼·奥涅金》前六章。"} -# QA(QA_input) diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/modelloader.py b/spaces/bigjoker/stable-diffusion-webui/modules/modelloader.py deleted file mode 100644 index fc3f6249f1ccb53c279f3e86d3ea95a4a7d03e50..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/modelloader.py +++ /dev/null @@ -1,172 +0,0 @@ -import glob -import os -import shutil -import importlib -from urllib.parse import urlparse - -from basicsr.utils.download_util import load_file_from_url -from modules import shared -from modules.upscaler import Upscaler -from modules.paths import script_path, models_path - - -def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list: - """ - A one-and done loader to try finding the desired models in specified directories. - - @param download_name: Specify to download from model_url immediately. - @param model_url: If no other models are found, this will be downloaded on upscale. - @param model_path: The location to store/find models in. - @param command_path: A command-line argument to search for models in first. - @param ext_filter: An optional list of filename extensions to filter by - @return: A list of paths containing the desired model(s) - """ - output = [] - - if ext_filter is None: - ext_filter = [] - - try: - places = [] - - if command_path is not None and command_path != model_path: - pretrained_path = os.path.join(command_path, 'experiments/pretrained_models') - if os.path.exists(pretrained_path): - print(f"Appending path: {pretrained_path}") - places.append(pretrained_path) - elif os.path.exists(command_path): - places.append(command_path) - - places.append(model_path) - - for place in places: - if os.path.exists(place): - for file in glob.iglob(place + '**/**', recursive=True): - full_path = file - if os.path.isdir(full_path): - continue - if os.path.islink(full_path) and not os.path.exists(full_path): - print(f"Skipping broken symlink: {full_path}") - continue - if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): - continue - if len(ext_filter) != 0: - model_name, extension = os.path.splitext(file) - if extension not in ext_filter: - continue - if file not in output: - output.append(full_path) - - if model_url is not None and len(output) == 0: - if download_name is not None: - dl = load_file_from_url(model_url, model_path, True, download_name) - output.append(dl) - else: - output.append(model_url) - - except Exception: - pass - - return output - - -def friendly_name(file: str): - if "http" in file: - file = urlparse(file).path - - file = os.path.basename(file) - model_name, extension = os.path.splitext(file) - return model_name - - -def cleanup_models(): - # This code could probably be more efficient if we used a tuple list or something to store the src/destinations - # and then enumerate that, but this works for now. In the future, it'd be nice to just have every "model" scaler - # somehow auto-register and just do these things... - root_path = script_path - src_path = models_path - dest_path = os.path.join(models_path, "Stable-diffusion") - move_files(src_path, dest_path, ".ckpt") - move_files(src_path, dest_path, ".safetensors") - src_path = os.path.join(root_path, "ESRGAN") - dest_path = os.path.join(models_path, "ESRGAN") - move_files(src_path, dest_path) - src_path = os.path.join(models_path, "BSRGAN") - dest_path = os.path.join(models_path, "ESRGAN") - move_files(src_path, dest_path, ".pth") - src_path = os.path.join(root_path, "gfpgan") - dest_path = os.path.join(models_path, "GFPGAN") - move_files(src_path, dest_path) - src_path = os.path.join(root_path, "SwinIR") - dest_path = os.path.join(models_path, "SwinIR") - move_files(src_path, dest_path) - src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/") - dest_path = os.path.join(models_path, "LDSR") - move_files(src_path, dest_path) - - -def move_files(src_path: str, dest_path: str, ext_filter: str = None): - try: - if not os.path.exists(dest_path): - os.makedirs(dest_path) - if os.path.exists(src_path): - for file in os.listdir(src_path): - fullpath = os.path.join(src_path, file) - if os.path.isfile(fullpath): - if ext_filter is not None: - if ext_filter not in file: - continue - print(f"Moving {file} from {src_path} to {dest_path}.") - try: - shutil.move(fullpath, dest_path) - except: - pass - if len(os.listdir(src_path)) == 0: - print(f"Removing empty folder: {src_path}") - shutil.rmtree(src_path, True) - except: - pass - - -builtin_upscaler_classes = [] -forbidden_upscaler_classes = set() - - -def list_builtin_upscalers(): - load_upscalers() - - builtin_upscaler_classes.clear() - builtin_upscaler_classes.extend(Upscaler.__subclasses__()) - - -def forbid_loaded_nonbuiltin_upscalers(): - for cls in Upscaler.__subclasses__(): - if cls not in builtin_upscaler_classes: - forbidden_upscaler_classes.add(cls) - - -def load_upscalers(): - # We can only do this 'magic' method to dynamically load upscalers if they are referenced, - # so we'll try to import any _model.py files before looking in __subclasses__ - modules_dir = os.path.join(shared.script_path, "modules") - for file in os.listdir(modules_dir): - if "_model.py" in file: - model_name = file.replace("_model.py", "") - full_model = f"modules.{model_name}_model" - try: - importlib.import_module(full_model) - except: - pass - - datas = [] - commandline_options = vars(shared.cmd_opts) - for cls in Upscaler.__subclasses__(): - if cls in forbidden_upscaler_classes: - continue - - name = cls.__name__ - cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" - scaler = cls(commandline_options.get(cmd_name, None)) - datas += scaler.scalers - - shared.sd_upscalers = datas diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/textual_inversion/learn_schedule.py b/spaces/bigjoker/stable-diffusion-webui/modules/textual_inversion/learn_schedule.py deleted file mode 100644 index f5156dd53e549570f53964781e564b7b92a1045a..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/textual_inversion/learn_schedule.py +++ /dev/null @@ -1,81 +0,0 @@ -import tqdm - - -class LearnScheduleIterator: - def __init__(self, learn_rate, max_steps, cur_step=0): - """ - specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000 - """ - - pairs = learn_rate.split(',') - self.rates = [] - self.it = 0 - self.maxit = 0 - try: - for i, pair in enumerate(pairs): - if not pair.strip(): - continue - tmp = pair.split(':') - if len(tmp) == 2: - step = int(tmp[1]) - if step > cur_step: - self.rates.append((float(tmp[0]), min(step, max_steps))) - self.maxit += 1 - if step > max_steps: - return - elif step == -1: - self.rates.append((float(tmp[0]), max_steps)) - self.maxit += 1 - return - else: - self.rates.append((float(tmp[0]), max_steps)) - self.maxit += 1 - return - assert self.rates - except (ValueError, AssertionError): - raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') - - - def __iter__(self): - return self - - def __next__(self): - if self.it < self.maxit: - self.it += 1 - return self.rates[self.it - 1] - else: - raise StopIteration - - -class LearnRateScheduler: - def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True): - self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step) - (self.learn_rate, self.end_step) = next(self.schedules) - self.verbose = verbose - - if self.verbose: - print(f'Training at rate of {self.learn_rate} until step {self.end_step}') - - self.finished = False - - def step(self, step_number): - if step_number < self.end_step: - return False - - try: - (self.learn_rate, self.end_step) = next(self.schedules) - except StopIteration: - self.finished = True - return False - return True - - def apply(self, optimizer, step_number): - if not self.step(step_number): - return - - if self.verbose: - tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}') - - for pg in optimizer.param_groups: - pg['lr'] = self.learn_rate - diff --git a/spaces/bioriAsaeru/text-to-voice/Canon Lbp6030 6040 6018l Drivers Download Support for i-SENSYS LBP6030 Series Printers.md b/spaces/bioriAsaeru/text-to-voice/Canon Lbp6030 6040 6018l Drivers Download Support for i-SENSYS LBP6030 Series Printers.md deleted file mode 100644 index b84ddedcdc9e5fbbe7249032606d4457328ad065..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Canon Lbp6030 6040 6018l Drivers Download Support for i-SENSYS LBP6030 Series Printers.md +++ /dev/null @@ -1,26 +0,0 @@ -
-

If you downloaded an ISO file for Windows 10, the file is saved locally at the location you selected. If you have a third-party DVD burning program installed on your computer that you prefer to use for creating the installation DVD, that program might open by going to the location where the file is saved and double-clicking the ISO file, or right-click the ISO file, select Open with and choose your preferred DVD burning software.

-

Our global team of engineers and sales support staff can answer your toughest questions. We have offices located around the globe, so we understand challenges specific to your region and location. If you can't find the software downloads and drivers that you need here, please reach out to us for immediate assistance. We are glad to help.

-

Magnet V7 Software Free Download


Download Zip ✸✸✸ https://urloso.com/2uyP9t



-

The Navisworks Freedom free download creates compressed, more secure, NWD format files. A practical solution to streaming large CAD models, NWD files require no model preparation, third-party server hosting, setup time, or ongoing costs.

-

Designed to be very easy-to-use, this accounting software will have you up and running within minutes. Simply download and install to get started with your business bookkeeping.Free Accounting Software. A free version of this bookkeeping software is available for use by small businesses with less than five employees. The free version does not expire, but does not support some features designed for larger organizations. You can download the free accounting software version here. Organize your Bookkeeping Sales and Accounts Receivable

  • Track sales and accounts receivable
  • Automatically record recurring orders and invoices
  • Accounts receivable & reports update as invoices are paid
  • Generate professional quotes, sales orders and invoices
Financial Analysis and Reports
  • Easily generate over 20 essential financial reports at any time
  • Income Statement (Profit & Loss statement) shows how your business has performed
  • Balance Sheet shows your current assets and liabilities
  • Analyze sales by customer, salesperson or item
  • Financial reports help prepare your tax returns quickly
Accounts Payable
  • Manage accounts payable and pay bills
  • Track outgoing payment and purchasing transactions
  • Generate purchase orders
  • Create and print checks
Additional Accounting Features
  • Multiple users with web access within the organization can securely log on and use the program on your network or online
  • Integrates with Inventoria to maintain inventory data across all aspects of your business Run multiple businesses with a single installation, including businesses operating with different currencies
  • Automatic integrated backup
Accounting Software
Bookkeeping Features
System Requirements
Download Now
- for Windows
- for Mac
Screenshots
Questions (FAQs)
Bookkeeping 101
Technical Support
Pricing & PurchaseRelated Business Software
Inventory Software
Invoicing Software
Manage Remote Employees
Personal Finance Software
Digital Dictation Software
More Business Software...Express Accounts is perfect for any business

-

Bitdefender Antivirus Free is a free antivirus software especially designed to protect your Windows PC. Quick to install and light on computer resources, it is good for gaming, image and video editing, and resource-intensive applications.

-

Usually, lead magnets are downloadable resources like an ebook, workbook, whitepaper, or something similar. Offering a lead magnet for email subscriptions is an effective and proven way to encourage your visitors to submit their email addresses.

-

IPEVO Visualizer LTSE is our first accessibility software. It comes with built-in assistive features that allow low vision users to navigate through the software freely and easily on his/her computer screen to adjust the live images coming from a USB or wirelessly connected IPEVO document camera.

-

BonitaSoft is the leading vendor of open source business process management solutions (BPMS). Created in 2009 by the founders of the Bonita project, the software has already been downloaded 270,000 times worldwide by businesses and organizations of all sizes, for simple and complex projects. BonitaSoft democratizes BPM by offering a quick and easy-to-implement solution for a minimal total

-

Jaspersoft's open source business intelligence suite is the world's most widely used BI software, with more than 8 million total downloads worldwide and more than 10,000 commercial customers in 96 countries. The company's Jaspersoft Business Intelligence Suite provides a web-based, open and modular approach that address the next generation business intelligence needs of the enterprise. Jaspersoft's software is rapidly updated by a community of more than 90,000 registered members working on more than 350 projects, which represents the world's largest business intelligence community.

-

Polarr is a browser-based software that brings a complete selection of advanced photo editing tools in a minimalist and completely customizable interface. In Polarr, you can not only apply adjustments to the entire photo using Color, Light, Detail, Noise, LUT, and other settings but also adjust specific areas in your photos with the help of Depth, Brush, Luminance, Radial, and other masks. Polarr is free downloadable photo editing software that offers a wide selection of blending modes, filters, and photorealistic effects that you can use to add creativity to your photos. Plus, it allows you to create your own filters and presets and share them with the world.

-

-

Photopea is a free online editor that runs locally without requiring you to upload files to a server. If you get disconnected from the internet while working in Photopea, you can still use it completely offline. Photopea free photo editor for PC is suitable for basic editing skills and advanced editing alike, enabling you to work with different types of layers, masks, smart objects, adjustments, filters, layer styles, etc. This editor comes with a great set of brush tools including Eraser, Clone Stamp, Dodge, Burn, Smudge, and Sponge. Photopea free software supports PSD, XCF, Sketch, XD, and CDR files.

-

The list above of the best free photo editing software will help everyone find something suitable for the different tasks they need to complete. Whether you want to transform an image drastically, tweak it slightly, or express your creativity, choose one of the free picture editing software for PC and enjoy the experience. Luminar Neo can provide the highest quality results in the shortest time and can be easily incorporated into any editing routine as a stand-alone app or plugin.

-

You need to download Distant Desktop to your computer and to each one you want to access remotely. This remote desktop software has no installer, so we recommend to move the executable to other folder. Wherever it will be convenient for you to launch it every time. Run executable file on all computers. You may need to allow Distant Desktop in the Firewall.

-

Magnet links are mainly used in P2P (peer-to-peer) file-sharing networks that are combined by several computer servers. The magnet links allow files to be referred to without the need for a continuously available host. These files can be generated by anyone who already has the file without a central authority. When you download a shared file from the P2P network through the magnet link, actually you download the file piece by piece from various servers which own part of the shared file. In this way, a magnet link can save much bandwidth on downloading files.

-

To use uTorrent as a magnet downloader, first, you may have to download the desktop application of uTorrent from its official website. If you are a mobile user, uTorrent only supports the mobile application of Android up to now. After installing, you have prepared well and can follow the instructions about how to download magnet links with uTorrent.

-

As a free multiple cloud storage manager, MultCloud can help you to gather all your cloud drives together to manage them in a single interface. It offers various features to let you manage cloud files with ease. One of the distinctive functions which can help you to download magnet links without a client, keep your file secure, and save the local storage of your devices is Remote Upload.

-

Remote Upload can help download files from URLs, Torrents, magnet links, and other links to your cloud drive without based on your network. The background program of MultCloud will download the magnet link directly to your clouds and let you download files from your clouds through a direct download link. That is to say, this is also a great and easy way to convert magnet link to direct download link.

-

Given that MultCloud will download files from magnet links to one of your cloud drives, you may have to prepare a cloud that has enough storage space to store the downloaded files. Here we take how to download a magnet link to Google Drive as an example to show you how to download a magnet link without uTorrent.

-

Step 4: Copy and paste the magnet link to the blank of the pop-up window. If you open the window from the Remote Upload page, you may have to manually select Google Drive as the cloud to store the downloaded files.

-

In addition, after downloading files from magnet links, you could use Cloud Transfer to backup Google Drive to Dropbox without taking up any extra time and effort. In this way, your files can be added a layer of protection.

-

The user interface (which is based on Qt) is quite simple and gets the job done. It supports magnet links as well. In addition to the basic functionalities, you also get a bunch of useful features like the web user interface, the ability to tweak the speeds, configuring the number of connections per torrent download, and more such options.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Game Masked Rider Kabuto Pc.md b/spaces/bioriAsaeru/text-to-voice/Game Masked Rider Kabuto Pc.md deleted file mode 100644 index 0b417a844e5983a907b7e90d27a32fbb763d279e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Game Masked Rider Kabuto Pc.md +++ /dev/null @@ -1,6 +0,0 @@ -

Game masked rider kabuto pc


DOWNLOAD ✦✦✦ https://urloso.com/2uyRXp



- -kamen rider belt game Get Started Kamen Rider: Memory of Heroez is a ... and developed by Omega Force for PS4, PS Vita, PC and Nintendo. ... on many items Kamen Rider Kabuto is a very fun game that is a loving ... 4d29de3e1b
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/HACK Artisteer v.4.1.0.60046 patch REPT-DeGun TPB 2013 How to Create Professional Web Designs.md b/spaces/bioriAsaeru/text-to-voice/HACK Artisteer v.4.1.0.60046 patch REPT-DeGun TPB 2013 How to Create Professional Web Designs.md deleted file mode 100644 index 2559b94082765549c7c74f0b0debc5dbb9652af4..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/HACK Artisteer v.4.1.0.60046 patch REPT-DeGun TPB 2013 How to Create Professional Web Designs.md +++ /dev/null @@ -1,6 +0,0 @@ -

HACK Artisteer v.4.1.0.60046 patch REPT-DeGun TPB 2013


Download File >>>>> https://urloso.com/2uyOkN



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Kisi Kisi Soal Seni Budaya SD Kelas 456 Semester 1 Inspirasi dan Motivasi untuk Mengembangkan Bakat dan Minat di Bidang Seni Budaya.md b/spaces/bioriAsaeru/text-to-voice/Kisi Kisi Soal Seni Budaya SD Kelas 456 Semester 1 Inspirasi dan Motivasi untuk Mengembangkan Bakat dan Minat di Bidang Seni Budaya.md deleted file mode 100644 index a835d0ef87e85ad34c7cc1d7834a2d15219b82c6..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Kisi Kisi Soal Seni Budaya SD Kelas 456 Semester 1 Inspirasi dan Motivasi untuk Mengembangkan Bakat dan Minat di Bidang Seni Budaya.md +++ /dev/null @@ -1,6 +0,0 @@ -

kisi kisi soal seni budaya sd kelas 4,5,6 semester 1 108


Download Ziphttps://urloso.com/2uyRR8



- - aaccfb2cb3
-
-
-

diff --git a/spaces/bradarrML/stablediffusion-infinity/perlin2d.py b/spaces/bradarrML/stablediffusion-infinity/perlin2d.py deleted file mode 100644 index 917c2c6511f5f1a75a284be9a9fef3248d82f2f9..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/stablediffusion-infinity/perlin2d.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np - -########## -# https://stackoverflow.com/questions/42147776/producing-2d-perlin-noise-with-numpy/42154921#42154921 -def perlin(x, y, seed=0): - # permutation table - np.random.seed(seed) - p = np.arange(256, dtype=int) - np.random.shuffle(p) - p = np.stack([p, p]).flatten() - # coordinates of the top-left - xi, yi = x.astype(int), y.astype(int) - # internal coordinates - xf, yf = x - xi, y - yi - # fade factors - u, v = fade(xf), fade(yf) - # noise components - n00 = gradient(p[p[xi] + yi], xf, yf) - n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1) - n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1) - n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf) - # combine noises - x1 = lerp(n00, n10, u) - x2 = lerp(n01, n11, u) # FIX1: I was using n10 instead of n01 - return lerp(x1, x2, v) # FIX2: I also had to reverse x1 and x2 here - - -def lerp(a, b, x): - "linear interpolation" - return a + x * (b - a) - - -def fade(t): - "6t^5 - 15t^4 + 10t^3" - return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3 - - -def gradient(h, x, y): - "grad converts h to the right gradient vector and return the dot product with (x,y)" - vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]]) - g = vectors[h % 4] - return g[:, :, 0] * x + g[:, :, 1] * y - - -########## \ No newline at end of file diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/metrics/rvm.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/metrics/rvm.py deleted file mode 100644 index 028324529531dd7ee97210dfd890fed717447be0..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/metrics/rvm.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp -import torch -from torch import nn -import torchaudio - - -def db_to_scale(volume: tp.Union[float, torch.Tensor]): - return 10 ** (volume / 20) - - -def scale_to_db(scale: torch.Tensor, min_volume: float = -120): - min_scale = db_to_scale(min_volume) - return 20 * torch.log10(scale.clamp(min=min_scale)) - - -class RelativeVolumeMel(nn.Module): - """Relative volume melspectrogram measure. - - Computes a measure of distance over two mel spectrogram that is interpretable in terms - of decibels. Given `x_ref` and `x_est` two waveforms of shape `[*, T]`, it will - first renormalize both by the ground truth of `x_ref`. - - Then it computes the mel spectrogram `z_ref` and `z_est` and compute volume of the difference - relative to the volume of `z_ref` for each time-frequency bin. It further adds some limits, e.g. - clamping the values between -25 and 25 dB (controlled by `min_relative_volume` and `max_relative_volume`) - with the goal of avoiding the loss being dominated by parts where the reference is almost silent. - Indeed, volumes in dB can take unbounded values both towards -oo and +oo, which can make the final - average metric harder to interpret. Besides, anything below -30 dB of attenuation would sound extremely - good (for a neural network output, although sound engineers typically aim for much lower attenuations). - Similarly, anything above +30 dB would just be completely missing the target, and there is no point - in measuring by exactly how much it missed it. -25, 25 is a more conservative range, but also more - in line with what neural nets currently can achieve. - - For instance, a Relative Volume Mel (RVM) score of -10 dB means that on average, the delta between - the target and reference mel-spec is 10 dB lower than the reference mel-spec value. - - The metric can be aggregated over a given frequency band in order have different insights for - different region of the spectrum. `num_aggregated_bands` controls the number of bands. - - ..Warning:: While this function is optimized for interpretability, nothing was done to ensure it - is numerically stable when computing its gradient. We thus advise against using it as a training loss. - - Args: - sample_rate (int): Sample rate of the input audio. - n_mels (int): Number of mel bands to use. - n_fft (int): Number of frequency bins for the STFT. - hop_length (int): Hop length of the STFT and the mel-spectrogram. - min_relative_volume (float): The error `z_ref - z_est` volume is given relative to - the volume of `z_ref`. If error is smaller than -25 dB of `z_ref`, then it is clamped. - max_relative_volume (float): Same as `min_relative_volume` but clamping if the error is larger than that. - max_initial_gain (float): When rescaling the audio at the very beginning, we will limit the gain - to that amount, to avoid rescaling near silence. Given in dB. - min_activity_volume (float): When computing the reference level from `z_ref`, will clamp low volume - bins to that amount. This is effectively our "zero" level for the reference mel-spectrogram, - and anything below that will be considered equally. - num_aggregated_bands (int): Number of bands to keep when computing the average RVM value. - For instance, a value of 3 would give 3 scores, roughly for low, mid and high freqs. - """ - def __init__(self, sample_rate: int = 24000, n_mels: int = 80, n_fft: int = 512, - hop_length: int = 128, min_relative_volume: float = -25, - max_relative_volume: float = 25, max_initial_gain: float = 25, - min_activity_volume: float = -25, - num_aggregated_bands: int = 4) -> None: - super().__init__() - self.melspec = torchaudio.transforms.MelSpectrogram( - n_mels=n_mels, n_fft=n_fft, hop_length=hop_length, - normalized=True, sample_rate=sample_rate, power=2) - self.min_relative_volume = min_relative_volume - self.max_relative_volume = max_relative_volume - self.max_initial_gain = max_initial_gain - self.min_activity_volume = min_activity_volume - self.num_aggregated_bands = num_aggregated_bands - - def forward(self, estimate: torch.Tensor, ground_truth: torch.Tensor) -> tp.Dict[str, torch.Tensor]: - """Compute RVM metric between estimate and reference samples. - - Args: - estimate (torch.Tensor): Estimate sample. - ground_truth (torch.Tensor): Reference sample. - - Returns: - dict[str, torch.Tensor]: Metrics with keys `rvm` for the overall average, and `rvm_{k}` - for the RVM over the k-th band (k=0..num_aggregated_bands - 1). - """ - min_scale = db_to_scale(-self.max_initial_gain) - std = ground_truth.pow(2).mean().sqrt().clamp(min=min_scale) - z_gt = self.melspec(ground_truth / std).sqrt() - z_est = self.melspec(estimate / std).sqrt() - - delta = z_gt - z_est - ref_db = scale_to_db(z_gt, self.min_activity_volume) - delta_db = scale_to_db(delta.abs(), min_volume=-120) - relative_db = (delta_db - ref_db).clamp(self.min_relative_volume, self.max_relative_volume) - dims = list(range(relative_db.dim())) - dims.remove(dims[-2]) - losses_per_band = relative_db.mean(dim=dims) - aggregated = [chunk.mean() for chunk in losses_per_band.chunk(self.num_aggregated_bands, dim=0)] - metrics = {f'rvm_{index}': value for index, value in enumerate(aggregated)} - metrics['rvm'] = losses_per_band.mean() - return metrics diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/chart.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/chart.py deleted file mode 100644 index 115cc084e98115c537382494af9eb0e246cd375b..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/chart.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from dataclasses import dataclass -from typing import Union -import torch - - -@dataclass -class DensePoseChartPredictorOutput: - """ - Predictor output that contains segmentation and inner coordinates predictions for predefined - body parts: - * coarse segmentation, a tensor of shape [N, K, Hout, Wout] - * fine segmentation, a tensor of shape [N, C, Hout, Wout] - * U coordinates, a tensor of shape [N, C, Hout, Wout] - * V coordinates, a tensor of shape [N, C, Hout, Wout] - where - - N is the number of instances - - K is the number of coarse segmentation channels ( - 2 = foreground / background, - 15 = one of 14 body parts / background) - - C is the number of fine segmentation channels ( - 24 fine body parts / background) - - Hout and Wout are height and width of predictions - """ - - coarse_segm: torch.Tensor - fine_segm: torch.Tensor - u: torch.Tensor - v: torch.Tensor - - def __len__(self): - """ - Number of instances (N) in the output - """ - return self.coarse_segm.size(0) - - def __getitem__( - self, item: Union[int, slice, torch.BoolTensor] - ) -> "DensePoseChartPredictorOutput": - """ - Get outputs for the selected instance(s) - - Args: - item (int or slice or tensor): selected items - """ - if isinstance(item, int): - return DensePoseChartPredictorOutput( - coarse_segm=self.coarse_segm[item].unsqueeze(0), - fine_segm=self.fine_segm[item].unsqueeze(0), - u=self.u[item].unsqueeze(0), - v=self.v[item].unsqueeze(0), - ) - else: - return DensePoseChartPredictorOutput( - coarse_segm=self.coarse_segm[item], - fine_segm=self.fine_segm[item], - u=self.u[item], - v=self.v[item], - ) - - def to(self, device: torch.device): - """ - Transfers all tensors to the given device - """ - coarse_segm = self.coarse_segm.to(device) - fine_segm = self.fine_segm.to(device) - u = self.u.to(device) - v = self.v.to(device) - return DensePoseChartPredictorOutput(coarse_segm=coarse_segm, fine_segm=fine_segm, u=u, v=v) diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/TensorMask/tensormask/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/TensorMask/tensormask/__init__.py deleted file mode 100644 index eec7978ac3c5204b1e51dac03ba3d45efc5b379d..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/TensorMask/tensormask/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .config import add_tensormask_config -from .arch import TensorMask diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/TridentNet/tridentnet/trident_conv.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/TridentNet/tridentnet/trident_conv.py deleted file mode 100644 index 18d5b0b9d73f2da263e7e026a82c62231a88d279..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/TridentNet/tridentnet/trident_conv.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch -from torch import nn -from torch.nn import functional as F -from torch.nn.modules.utils import _pair - -from detectron2.layers.wrappers import _NewEmptyTensorOp - - -class TridentConv(nn.Module): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - paddings=0, - dilations=1, - groups=1, - num_branch=1, - test_branch_idx=-1, - bias=False, - norm=None, - activation=None, - ): - super(TridentConv, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.num_branch = num_branch - self.stride = _pair(stride) - self.groups = groups - self.with_bias = bias - if isinstance(paddings, int): - paddings = [paddings] * self.num_branch - if isinstance(dilations, int): - dilations = [dilations] * self.num_branch - self.paddings = [_pair(padding) for padding in paddings] - self.dilations = [_pair(dilation) for dilation in dilations] - self.test_branch_idx = test_branch_idx - self.norm = norm - self.activation = activation - - assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1 - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) - ) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.bias = None - - nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") - if self.bias is not None: - nn.init.constant_(self.bias, 0) - - def forward(self, inputs): - num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 - assert len(inputs) == num_branch - - if inputs[0].numel() == 0: - output_shape = [ - (i + 2 * p - (di * (k - 1) + 1)) // s + 1 - for i, p, di, k, s in zip( - inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride - ) - ] - output_shape = [input[0].shape[0], self.weight.shape[0]] + output_shape - return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs] - - if self.training or self.test_branch_idx == -1: - outputs = [ - F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation, self.groups) - for input, dilation, padding in zip(inputs, self.dilations, self.paddings) - ] - else: - outputs = [ - F.conv2d( - inputs[0], - self.weight, - self.bias, - self.stride, - self.paddings[self.test_branch_idx], - self.dilations[self.test_branch_idx], - self.groups, - ) - ] - - if self.norm is not None: - outputs = [self.norm(x) for x in outputs] - if self.activation is not None: - outputs = [self.activation(x) for x in outputs] - return outputs - - def extra_repr(self): - tmpstr = "in_channels=" + str(self.in_channels) - tmpstr += ", out_channels=" + str(self.out_channels) - tmpstr += ", kernel_size=" + str(self.kernel_size) - tmpstr += ", num_branch=" + str(self.num_branch) - tmpstr += ", test_branch_idx=" + str(self.test_branch_idx) - tmpstr += ", stride=" + str(self.stride) - tmpstr += ", paddings=" + str(self.paddings) - tmpstr += ", dilations=" + str(self.dilations) - tmpstr += ", groups=" + str(self.groups) - tmpstr += ", bias=" + str(self.with_bias) - return tmpstr diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/layers/test_deformable.py b/spaces/brjathu/HMR2.0/vendor/detectron2/tests/layers/test_deformable.py deleted file mode 100644 index 4aa319fc7e614f6a7a8ece7a45c177211c03012d..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/layers/test_deformable.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest -import torch - -from detectron2.layers import DeformConv, ModulatedDeformConv -from detectron2.utils.env import TORCH_VERSION - - -@unittest.skipIf( - TORCH_VERSION == (1, 8) and torch.cuda.is_available(), - "This test fails under cuda11 + torch1.8.", -) -class DeformableTest(unittest.TestCase): - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_forward_output(self): - device = torch.device("cuda") - N, C, H, W = shape = 1, 1, 5, 5 - kernel_size = 3 - padding = 1 - - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) - """ - 0 1 2 3 4 - 5 6 7 8 9 - 10 11 12 13 14 - 15 16 17 18 19 - 20 21 22 23 24 - """ - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) - - # Test DCN v1 - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) - output = deform(inputs, offset) - output = output.detach().cpu().numpy() - deform_results = np.array( - [ - [30, 41.25, 48.75, 45, 28.75], - [62.25, 81, 90, 80.25, 50.25], - [99.75, 126, 135, 117.75, 72.75], - [105, 131.25, 138.75, 120, 73.75], - [71.75, 89.25, 93.75, 80.75, 49.5], - ] - ) - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) - - # Test DCN v2 - mask_channels = kernel_size * kernel_size - mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( - device - ) - modulate_deform.weight = deform.weight - output = modulate_deform(inputs, offset, mask) - output = output.detach().cpu().numpy() - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5)) - - def test_forward_output_on_cpu(self): - device = torch.device("cpu") - N, C, H, W = shape = 1, 1, 5, 5 - kernel_size = 3 - padding = 1 - - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) - - # Test DCN v1 on cpu - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) - output = deform(inputs, offset) - output = output.detach().cpu().numpy() - deform_results = np.array( - [ - [30, 41.25, 48.75, 45, 28.75], - [62.25, 81, 90, 80.25, 50.25], - [99.75, 126, 135, 117.75, 72.75], - [105, 131.25, 138.75, 120, 73.75], - [71.75, 89.25, 93.75, 80.75, 49.5], - ] - ) - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) - - @unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access") - def test_forward_output_on_cpu_equals_output_on_gpu(self): - N, C, H, W = shape = 2, 4, 10, 10 - kernel_size = 3 - padding = 1 - - for groups in [1, 2]: - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape) - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32) - - deform_gpu = DeformConv( - C, C, kernel_size=kernel_size, padding=padding, groups=groups - ).to("cuda") - deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight)) - output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy() - - deform_cpu = DeformConv( - C, C, kernel_size=kernel_size, padding=padding, groups=groups - ).to("cpu") - deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight)) - output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy() - - self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten())) - - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_small_input(self): - device = torch.device("cuda") - for kernel_size in [3, 5]: - padding = kernel_size // 2 - N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1) - - inputs = torch.rand(shape).to(device) # input size is smaller than kernel size - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - output = deform(inputs, offset) - self.assertTrue(output.shape == inputs.shape) - - mask_channels = kernel_size * kernel_size - mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv( - C, C, kernel_size, padding=padding, bias=False - ).to(device) - output = modulate_deform(inputs, offset, mask) - self.assertTrue(output.shape == inputs.shape) - - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_raise_exception(self): - device = torch.device("cuda") - N, C, H, W = shape = 1, 1, 3, 3 - kernel_size = 3 - padding = 1 - - inputs = torch.rand(shape, dtype=torch.float32).to(device) - offset_channels = kernel_size * kernel_size # This is wrong channels for offset - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - self.assertRaises(RuntimeError, deform, inputs, offset) - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask - mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( - device - ) - self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask) - - def test_repr(self): - module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) - correct_string = ( - "DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " - "stride=(1, 1), padding=(1, 1), dilation=(1, 1), " - "groups=1, deformable_groups=2, bias=False)" - ) - self.assertEqual(repr(module), correct_string) - - module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) - correct_string = ( - "ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " - "stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)" - ) - self.assertEqual(repr(module), correct_string) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op_gpu/fused_bias_act.cpp b/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op_gpu/fused_bias_act.cpp deleted file mode 100644 index 71f612cdbaaca03822eedc002a980d055d2f485c..0000000000000000000000000000000000000000 --- a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op_gpu/fused_bias_act.cpp +++ /dev/null @@ -1,32 +0,0 @@ - -#include -#include - -torch::Tensor fused_bias_act_op(const torch::Tensor &input, - const torch::Tensor &bias, - const torch::Tensor &refer, int act, int grad, - float alpha, float scale); - -#define CHECK_CUDA(x) \ - TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor &input, - const torch::Tensor &bias, - const torch::Tensor &refer, int act, int grad, - float alpha, float scale) { - CHECK_INPUT(input); - CHECK_INPUT(bias); - - at::DeviceGuard guard(input.device()); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} \ No newline at end of file diff --git a/spaces/camenduru-com/riffusion/README.md b/spaces/camenduru-com/riffusion/README.md deleted file mode 100644 index 840443d298d74eb922cab12e1cdfc369cd16dcce..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/riffusion/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Riffusion App -emoji: 🎶 -colorFrom: purple -colorTo: purple -sdk: docker -pinned: false ---- diff --git a/spaces/camenduru-com/webui-docker/oh-no.py b/spaces/camenduru-com/webui-docker/oh-no.py deleted file mode 100644 index 54712c828d417a827e9dc10701d13b19ba682cda..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/webui-docker/oh-no.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr - -block = gr.Blocks() - -def run(): - with block: - gr.Markdown( - """ - 🐣 Please follow me for new updates https://twitter.com/camenduru
- 🧬 Please follow me for new updates https://github.com/camenduru
- 🔥 Please join our discord server https://discord.gg/k5BwmmvJJU
- """) - block.launch(server_name="0.0.0.0", server_port=7860) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/spaces/cbensimon/streamlit-ui-gallery/app.py b/spaces/cbensimon/streamlit-ui-gallery/app.py deleted file mode 100644 index d6e2538a1b9dabc92c9afceaa1acbcf30a96426a..0000000000000000000000000000000000000000 --- a/spaces/cbensimon/streamlit-ui-gallery/app.py +++ /dev/null @@ -1,159 +0,0 @@ -import streamlit as st -import plotly.figure_factory as ff -import numpy as np -import random - - -# This code is different for each deployed app. -CURRENT_THEME = "blue" -IS_DARK_THEME = True -EXPANDER_TEXT = """ - This is a custom theme. You can enable it by copying the following code - to `.streamlit/config.toml`: - ```python - [theme] - primaryColor = "#E694FF" - backgroundColor = "#00172B" - secondaryBackgroundColor = "#0083B8" - textColor = "#C6CDD4" - font = "sans-serif" - ``` - """ - - -# This code is the same for each deployed app. -st.image( - "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/271/artist-palette_1f3a8.png", - width=100, -) - -""" -# Try out Theming! -Click on the images below to view this app with different themes. -""" - -"" - -THEMES = [ - "light", - "dark", - "green", - "blue", -] -GITHUB_OWNER = "streamlit" - -# Show thumbnails for available themes. -# As html img tags here, so we can add links on them. -cols = st.columns(len(THEMES)) -for col, theme in zip(cols, THEMES): - - # Get repo name for this theme (to link to correct deployed app)- - if theme == "light": - repo = "theming-showcase" - else: - repo = f"theming-showcase-{theme}" - - # Set border of current theme to red, otherwise black or white - if theme == CURRENT_THEME: - border_color = "red" - else: - border_color = "lightgrey" if IS_DARK_THEME else "black" - - col.markdown( - #f'

{theme}

', - f'

{theme}

', - unsafe_allow_html=True, - ) - if theme in ["light", "dark"]: - theme_descriptor = theme.capitalize() + " theme" - else: - theme_descriptor = "Custom theme" - col.write(f"

{theme_descriptor}

", unsafe_allow_html=True) - - -"" -with st.expander("Not loading?"): - st.write( - "You probably played around with themes before and overrode this app's theme. Go to ☰ -> Settings -> Theme and select *Custom Theme*." - ) -with st.expander("How can I use this theme in my app?"): - st.write(EXPANDER_TEXT) - -"" -"" - -# Draw some dummy content in main page and sidebar. -def draw_all( - key, - plot=False, -): - st.write( - """ - # Example Widgets - - These widgets don't do anything. But look at all the new colors they got 👀 - - ```python - # First some code. - streamlit = "cool" - theming = "fantastic" - both = "💥" - ``` - """ - ) - - st.checkbox("Is this cool or what?", key=str(random.randint(0, 10000))) - st.radio( - "How many balloons?", - ["1 balloon 🎈", "2 balloons 🎈🎈", "3 balloons 🎈🎈🎈"], - key=str(random.randint(0, 10000)), - ) - st.button("🤡 Click me", key=str(random.randint(0, 10000))) - - # if plot: - # st.write("Oh look, a plot:") - # x1 = np.random.randn(200) - 2 - # x2 = np.random.randn(200) - # x3 = np.random.randn(200) + 2 - - # hist_data = [x1, x2, x3] - # group_labels = ["Group 1", "Group 2", "Group 3"] - - # fig = ff.create_distplot(hist_data, group_labels, bin_size=[0.1, 0.25, 0.5]) - - # st.plotly_chart(fig, use_container_width=True) - - st.file_uploader("You can now upload with style", key=str(random.randint(0, 10000))) - st.slider( - "From 10 to 11, how cool are themes?", min_value=10, max_value=11, key=str(random.randint(0, 10000)) - ) - # st.select_slider("Pick a number", [1, 2, 3], key=key) - st.number_input("So many numbers", key=str(random.randint(0, 10000))) - st.text_area("A little writing space for you :)", key=str(random.randint(0, 10000))) - st.text_input("Text input :)", key=str(random.randint(0, 10000))) - st.selectbox( - "My favorite thing in the world is...", - ["Streamlit", "Theming", "Baloooons 🎈 "], - key=str(random.randint(0, 10000)), - ) - # st.multiselect("Pick a number", [1, 2, 3], key=key) - # st.color_picker("Colors, colors, colors", key=key) - with st.expander("Expand me!"): - st.write("Hey there! Nothing to see here 👀 ") - st.write("") - # st.write("That's our progress on theming:") - # st.progress(0.99) - if plot: - st.write("And here's some data and plots") - st.json({"data": [1, 2, 3, 4]}) - st.dataframe({"data": [1, 2, 3, 4]}) - st.table({"data": [1, 2, 3, 4]}) - st.line_chart({"data": [1, 2, 3, 4]}) - # st.help(st.write) - st.write("This is the end. Have fun building themes!") - - -draw_all("main", plot=True) - -with st.sidebar: - draw_all("sidebar") diff --git a/spaces/ccolas/TastyPiano/src/cocktails/utilities/cocktail_generation_utilities/__init__.py b/spaces/ccolas/TastyPiano/src/cocktails/utilities/cocktail_generation_utilities/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/longcode/jpgd.cpp b/spaces/cfwef/gpt/crazy_functions/test_project/cpp/longcode/jpgd.cpp deleted file mode 100644 index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000 --- a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/longcode/jpgd.cpp +++ /dev/null @@ -1,3276 +0,0 @@ -// jpgd.cpp - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -// Last updated Apr. 16, 2011 -// Alex Evans: Linear memory allocator (taken from jpge.h). -// -// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2. -// -// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling. -// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain" -// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html - -#include "jpgd.h" -#include - -#include -// BEGIN EPIC MOD -#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0 -// END EPIC MOD - -#ifdef _MSC_VER -#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable -#endif - -// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling). -// This is slower, but results in higher quality on images with highly saturated colors. -#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1 - -#define JPGD_TRUE (1) -#define JPGD_FALSE (0) - -#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b)) -#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b)) - -namespace jpgd { - - static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); } - static inline void jpgd_free(void *p) { FMemory::Free(p); } - -// BEGIN EPIC MOD -//@UE3 - use UE3 BGRA encoding instead of assuming RGBA - // stolen from IImageWrapper.h - enum ERGBFormatJPG - { - Invalid = -1, - RGBA = 0, - BGRA = 1, - Gray = 2, - }; - static ERGBFormatJPG jpg_format; -// END EPIC MOD - - // DCT coefficients are stored in this sequence. - static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; - - enum JPEG_MARKER - { - M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8, - M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC, - M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7, - M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF, - M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0 - }; - - enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 }; - -#define CONST_BITS 13 -#define PASS1_BITS 2 -#define SCALEDONE ((int32)1) - -#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */ - -#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n)) -#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n)) - -#define MULTIPLY(var, cnst) ((var) * (cnst)) - -#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i)) - - // Compiler creates a fast path 1D IDCT for X non-zero columns - template - struct Row - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - // ACCESS_COL() will be optimized at compile time to either an array access, or 0. -#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0) - - const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS; - const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS); - pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS); - pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS); - pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS); - pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS); - pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS); - pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS); - pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS); - } - }; - - template <> - struct Row<0> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { -#ifdef _MSC_VER - pTemp; pSrc; -#endif - } - }; - - template <> - struct Row<1> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - const int dcval = (pSrc[0] << PASS1_BITS); - - pTemp[0] = dcval; - pTemp[1] = dcval; - pTemp[2] = dcval; - pTemp[3] = dcval; - pTemp[4] = dcval; - pTemp[5] = dcval; - pTemp[6] = dcval; - pTemp[7] = dcval; - } - }; - - // Compiler creates a fast path 1D IDCT for X non-zero rows - template - struct Col - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - // ACCESS_ROW() will be optimized at compile time to either an array access, or 0. -#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0) - - const int z2 = ACCESS_ROW(2); - const int z3 = ACCESS_ROW(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS; - const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*0] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*7] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*1] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*6] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*2] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*5] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*3] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*4] = (uint8)CLAMP(i); - } - }; - - template <> - struct Col<1> - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3); - const uint8 dcval_clamped = (uint8)CLAMP(dcval); - pDst_ptr[0*8] = dcval_clamped; - pDst_ptr[1*8] = dcval_clamped; - pDst_ptr[2*8] = dcval_clamped; - pDst_ptr[3*8] = dcval_clamped; - pDst_ptr[4*8] = dcval_clamped; - pDst_ptr[5*8] = dcval_clamped; - pDst_ptr[6*8] = dcval_clamped; - pDst_ptr[7*8] = dcval_clamped; - } - }; - - static const uint8 s_idct_row_table[] = - { - 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0, - 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0, - 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0, - 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0, - 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2, - 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2, - 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4, - 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8, - }; - - static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; - - void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag) - { - JPGD_ASSERT(block_max_zag >= 1); - JPGD_ASSERT(block_max_zag <= 64); - - if (block_max_zag == 1) - { - int k = ((pSrc_ptr[0] + 4) >> 3) + 128; - k = CLAMP(k); - k = k | (k<<8); - k = k | (k<<16); - - for (int i = 8; i > 0; i--) - { - *(int*)&pDst_ptr[0] = k; - *(int*)&pDst_ptr[4] = k; - pDst_ptr += 8; - } - return; - } - - int temp[64]; - - const jpgd_block_t* pSrc = pSrc_ptr; - int* pTemp = temp; - - const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8]; - int i; - for (i = 8; i > 0; i--, pRow_tab++) - { - switch (*pRow_tab) - { - case 0: Row<0>::idct(pTemp, pSrc); break; - case 1: Row<1>::idct(pTemp, pSrc); break; - case 2: Row<2>::idct(pTemp, pSrc); break; - case 3: Row<3>::idct(pTemp, pSrc); break; - case 4: Row<4>::idct(pTemp, pSrc); break; - case 5: Row<5>::idct(pTemp, pSrc); break; - case 6: Row<6>::idct(pTemp, pSrc); break; - case 7: Row<7>::idct(pTemp, pSrc); break; - case 8: Row<8>::idct(pTemp, pSrc); break; - } - - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - - const int nonzero_rows = s_idct_col_table[block_max_zag - 1]; - for (i = 8; i > 0; i--) - { - switch (nonzero_rows) - { - case 1: Col<1>::idct(pDst_ptr, pTemp); break; - case 2: Col<2>::idct(pDst_ptr, pTemp); break; - case 3: Col<3>::idct(pDst_ptr, pTemp); break; - case 4: Col<4>::idct(pDst_ptr, pTemp); break; - case 5: Col<5>::idct(pDst_ptr, pTemp); break; - case 6: Col<6>::idct(pDst_ptr, pTemp); break; - case 7: Col<7>::idct(pDst_ptr, pTemp); break; - case 8: Col<8>::idct(pDst_ptr, pTemp); break; - } - - pTemp++; - pDst_ptr++; - } - } - - void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr) - { - int temp[64]; - int* pTemp = temp; - const jpgd_block_t* pSrc = pSrc_ptr; - - for (int i = 4; i > 0; i--) - { - Row<4>::idct(pTemp, pSrc); - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - for (int i = 8; i > 0; i--) - { - Col<4>::idct(pDst_ptr, pTemp); - pTemp++; - pDst_ptr++; - } - } - - // Retrieve one character from the input stream. - inline uint jpeg_decoder::get_char() - { - // Any bytes remaining in buffer? - if (!m_in_buf_left) - { - // Try to get more bytes. - prep_in_buffer(); - // Still nothing to get? - if (!m_in_buf_left) - { - // Pad the end of the stream with 0xFF 0xD9 (EOI marker) - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Same as previous method, except can indicate if the character is a pad character or not. - inline uint jpeg_decoder::get_char(bool *pPadding_flag) - { - if (!m_in_buf_left) - { - prep_in_buffer(); - if (!m_in_buf_left) - { - *pPadding_flag = true; - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - *pPadding_flag = false; - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Inserts a previously retrieved character back into the input buffer. - inline void jpeg_decoder::stuff_char(uint8 q) - { - *(--m_pIn_buf_ofs) = q; - m_in_buf_left++; - } - - // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered. - inline uint8 jpeg_decoder::get_octet() - { - bool padding_flag; - int c = get_char(&padding_flag); - - if (c == 0xFF) - { - if (padding_flag) - return 0xFF; - - c = get_char(&padding_flag); - if (padding_flag) - { - stuff_char(0xFF); - return 0xFF; - } - - if (c == 0x00) - return 0xFF; - else - { - stuff_char(static_cast(c)); - stuff_char(0xFF); - return 0xFF; - } - } - - return static_cast(c); - } - - // Retrieves a variable number of bits from the input stream. Does not recognize markers. - inline uint jpeg_decoder::get_bits(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - uint c1 = get_char(); - uint c2 = get_char(); - m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2; - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered. - inline uint jpeg_decoder::get_bits_no_markers(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF)) - { - uint c1 = get_octet(); - uint c2 = get_octet(); - m_bit_buf |= (c1 << 8) | c2; - } - else - { - m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1]; - m_in_buf_left -= 2; - m_pIn_buf_ofs += 2; - } - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0) - { - // Decode more bits, use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - } - else - get_bits_no_markers(pH->code_size[symbol]); - - return symbol; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0) - { - // Use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - - extra_bits = get_bits_no_markers(symbol & 0xF); - } - else - { - JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0)); - - if (symbol & 0x8000) - { - get_bits_no_markers((symbol >> 8) & 31); - extra_bits = symbol >> 16; - } - else - { - int code_size = (symbol >> 8) & 31; - int num_extra_bits = symbol & 0xF; - int bits = code_size + num_extra_bits; - if (bits <= (m_bits_left + 16)) - extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1); - else - { - get_bits_no_markers(code_size); - extra_bits = get_bits_no_markers(num_extra_bits); - } - } - - symbol &= 0xFF; - } - - return symbol; - } - - // Tables and macro used to fully decode the DPCM differences. - static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; - static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 }; - static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) }; -#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x)) - - // Clamps a value between 0-255. - inline uint8 jpeg_decoder::clamp(int i) - { - if (static_cast(i) > 255) - i = (((~i) >> 31) & 0xFF); - - return static_cast(i); - } - - namespace DCT_Upsample - { - struct Matrix44 - { - typedef int Element_Type; - enum { NUM_ROWS = 4, NUM_COLS = 4 }; - - Element_Type v[NUM_ROWS][NUM_COLS]; - - inline int rows() const { return NUM_ROWS; } - inline int cols() const { return NUM_COLS; } - - inline const Element_Type & at(int r, int c) const { return v[r][c]; } - inline Element_Type & at(int r, int c) { return v[r][c]; } - - inline Matrix44() { } - - inline Matrix44& operator += (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) += a.at(r, 0); - at(r, 1) += a.at(r, 1); - at(r, 2) += a.at(r, 2); - at(r, 3) += a.at(r, 3); - } - return *this; - } - - inline Matrix44& operator -= (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) -= a.at(r, 0); - at(r, 1) -= a.at(r, 1); - at(r, 2) -= a.at(r, 2); - at(r, 3) -= a.at(r, 3); - } - return *this; - } - - friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) + b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) + b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) + b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) + b.at(r, 3); - } - return ret; - } - - friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) - b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) - b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) - b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) - b.at(r, 3); - } - return ret; - } - - static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3)); - } - } - - static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3)); - } - } - }; - - const int FRACT_BITS = 10; - const int SCALE = 1 << FRACT_BITS; - - typedef int Temp_Type; -#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS) -#define F(i) ((int)((i) * SCALE + .5f)) - - // Any decent C++ compiler will optimize this at compile time to a 0, or an array access. -#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8]) - - // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix - template - struct P_Q - { - static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X000 = AT(0, 0); - const Temp_Type X001 = AT(0, 1); - const Temp_Type X002 = AT(0, 2); - const Temp_Type X003 = AT(0, 3); - const Temp_Type X004 = AT(0, 4); - const Temp_Type X005 = AT(0, 5); - const Temp_Type X006 = AT(0, 6); - const Temp_Type X007 = AT(0, 7); - const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0)); - const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1)); - const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2)); - const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3)); - const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4)); - const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5)); - const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6)); - const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7)); - const Temp_Type X020 = AT(4, 0); - const Temp_Type X021 = AT(4, 1); - const Temp_Type X022 = AT(4, 2); - const Temp_Type X023 = AT(4, 3); - const Temp_Type X024 = AT(4, 4); - const Temp_Type X025 = AT(4, 5); - const Temp_Type X026 = AT(4, 6); - const Temp_Type X027 = AT(4, 7); - const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0)); - const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1)); - const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2)); - const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3)); - const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4)); - const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5)); - const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6)); - const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7)); - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - P.at(0, 0) = X000; - P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f)); - P.at(0, 2) = X004; - P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f)); - P.at(1, 0) = X010; - P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f)); - P.at(1, 2) = X014; - P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f)); - P.at(2, 0) = X020; - P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f)); - P.at(2, 2) = X024; - P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f)); - P.at(3, 0) = X030; - P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f)); - P.at(3, 2) = X034; - P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f)); - // 40 muls 24 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f)); - Q.at(0, 1) = X002; - Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f)); - Q.at(0, 3) = X006; - Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f)); - Q.at(1, 1) = X012; - Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f)); - Q.at(1, 3) = X016; - Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f)); - Q.at(2, 1) = X022; - Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f)); - Q.at(2, 3) = X026; - Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f)); - Q.at(3, 1) = X032; - Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f)); - Q.at(3, 3) = X036; - // 40 muls 24 adds - } - }; - - template - struct R_S - { - static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0)); - const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1)); - const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2)); - const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3)); - const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4)); - const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5)); - const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6)); - const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7)); - const Temp_Type X110 = AT(2, 0); - const Temp_Type X111 = AT(2, 1); - const Temp_Type X112 = AT(2, 2); - const Temp_Type X113 = AT(2, 3); - const Temp_Type X114 = AT(2, 4); - const Temp_Type X115 = AT(2, 5); - const Temp_Type X116 = AT(2, 6); - const Temp_Type X117 = AT(2, 7); - const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0)); - const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1)); - const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2)); - const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3)); - const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4)); - const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5)); - const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6)); - const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7)); - const Temp_Type X130 = AT(6, 0); - const Temp_Type X131 = AT(6, 1); - const Temp_Type X132 = AT(6, 2); - const Temp_Type X133 = AT(6, 3); - const Temp_Type X134 = AT(6, 4); - const Temp_Type X135 = AT(6, 5); - const Temp_Type X136 = AT(6, 6); - const Temp_Type X137 = AT(6, 7); - // 80 muls 48 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - R.at(0, 0) = X100; - R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f)); - R.at(0, 2) = X104; - R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f)); - R.at(1, 0) = X110; - R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f)); - R.at(1, 2) = X114; - R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f)); - R.at(2, 0) = X120; - R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f)); - R.at(2, 2) = X124; - R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f)); - R.at(3, 0) = X130; - R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f)); - R.at(3, 2) = X134; - R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f)); - // 40 muls 24 adds - // 4x4 = 4x8 times 8x4, matrix 1 is constant - S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f)); - S.at(0, 1) = X102; - S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f)); - S.at(0, 3) = X106; - S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f)); - S.at(1, 1) = X112; - S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f)); - S.at(1, 3) = X116; - S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f)); - S.at(2, 1) = X122; - S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f)); - S.at(2, 3) = X126; - S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f)); - S.at(3, 1) = X132; - S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f)); - S.at(3, 3) = X136; - // 40 muls 24 adds - } - }; - } // end namespace DCT_Upsample - - // Unconditionally frees all allocated m_blocks. - void jpeg_decoder::free_all_blocks() - { - m_pStream = NULL; - for (mem_block *b = m_pMem_blocks; b; ) - { - mem_block *n = b->m_pNext; - jpgd_free(b); - b = n; - } - m_pMem_blocks = NULL; - } - - // This method handles all errors. - // It could easily be changed to use C++ exceptions. - void jpeg_decoder::stop_decoding(jpgd_status status) - { - m_error_code = status; - free_all_blocks(); - longjmp(m_jmp_state, status); - - // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit - // that this function doesn't return, otherwise we get this error: - // - // error : function declared 'noreturn' should not return - exit(1); - } - - void *jpeg_decoder::alloc(size_t nSize, bool zero) - { - nSize = (JPGD_MAX(nSize, 1) + 3) & ~3; - char *rv = NULL; - for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext) - { - if ((b->m_used_count + nSize) <= b->m_size) - { - rv = b->m_data + b->m_used_count; - b->m_used_count += nSize; - break; - } - } - if (!rv) - { - int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047); - mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity); - if (!b) stop_decoding(JPGD_NOTENOUGHMEM); - b->m_pNext = m_pMem_blocks; m_pMem_blocks = b; - b->m_used_count = nSize; - b->m_size = capacity; - rv = b->m_data; - } - if (zero) memset(rv, 0, nSize); - return rv; - } - - void jpeg_decoder::word_clear(void *p, uint16 c, uint n) - { - uint8 *pD = (uint8*)p; - const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF; - while (n) - { - pD[0] = l; pD[1] = h; pD += 2; - n--; - } - } - - // Refill the input buffer. - // This method will sit in a loop until (A) the buffer is full or (B) - // the stream's read() method reports and end of file condition. - void jpeg_decoder::prep_in_buffer() - { - m_in_buf_left = 0; - m_pIn_buf_ofs = m_in_buf; - - if (m_eof_flag) - return; - - do - { - int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag); - if (bytes_read == -1) - stop_decoding(JPGD_STREAM_READ); - - m_in_buf_left += bytes_read; - } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag)); - - m_total_bytes_read += m_in_buf_left; - - // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid). - // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.) - word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64); - } - - // Read a Huffman code table. - void jpeg_decoder::read_dht_marker() - { - int i, index, count; - uint8 huff_num[17]; - uint8 huff_val[256]; - - uint num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= 2; - - while (num_left) - { - index = get_bits(8); - - huff_num[0] = 0; - - count = 0; - - for (i = 1; i <= 16; i++) - { - huff_num[i] = static_cast(get_bits(8)); - count += huff_num[i]; - } - - if (count > 255) - stop_decoding(JPGD_BAD_DHT_COUNTS); - - for (i = 0; i < count; i++) - huff_val[i] = static_cast(get_bits(8)); - - i = 1 + 16 + count; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= i; - - if ((index & 0x10) > 0x10) - stop_decoding(JPGD_BAD_DHT_INDEX); - - index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1); - - if (index >= JPGD_MAX_HUFF_TABLES) - stop_decoding(JPGD_BAD_DHT_INDEX); - - if (!m_huff_num[index]) - m_huff_num[index] = (uint8 *)alloc(17); - - if (!m_huff_val[index]) - m_huff_val[index] = (uint8 *)alloc(256); - - m_huff_ac[index] = (index & 0x10) != 0; - memcpy(m_huff_num[index], huff_num, 17); - memcpy(m_huff_val[index], huff_val, 256); - } - } - - // Read a quantization table. - void jpeg_decoder::read_dqt_marker() - { - int n, i, prec; - uint num_left; - uint temp; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DQT_MARKER); - - num_left -= 2; - - while (num_left) - { - n = get_bits(8); - prec = n >> 4; - n &= 0x0F; - - if (n >= JPGD_MAX_QUANT_TABLES) - stop_decoding(JPGD_BAD_DQT_TABLE); - - if (!m_quant[n]) - m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t)); - - // read quantization entries, in zag order - for (i = 0; i < 64; i++) - { - temp = get_bits(8); - - if (prec) - temp = (temp << 8) + get_bits(8); - - m_quant[n][i] = static_cast(temp); - } - - i = 64 + 1; - - if (prec) - i += 64; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DQT_LENGTH); - - num_left -= i; - } - } - - // Read the start of frame (SOF) marker. - void jpeg_decoder::read_sof_marker() - { - int i; - uint num_left; - - num_left = get_bits(16); - - if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */ - stop_decoding(JPGD_BAD_PRECISION); - - m_image_y_size = get_bits(16); - - if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT)) - stop_decoding(JPGD_BAD_HEIGHT); - - m_image_x_size = get_bits(16); - - if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH)) - stop_decoding(JPGD_BAD_WIDTH); - - m_comps_in_frame = get_bits(8); - - if (m_comps_in_frame > JPGD_MAX_COMPONENTS) - stop_decoding(JPGD_TOO_MANY_COMPONENTS); - - if (num_left != (uint)(m_comps_in_frame * 3 + 8)) - stop_decoding(JPGD_BAD_SOF_LENGTH); - - for (i = 0; i < m_comps_in_frame; i++) - { - m_comp_ident[i] = get_bits(8); - m_comp_h_samp[i] = get_bits(4); - m_comp_v_samp[i] = get_bits(4); - m_comp_quant[i] = get_bits(8); - } - } - - // Used to skip unrecognized markers. - void jpeg_decoder::skip_variable_marker() - { - uint num_left; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_VARIABLE_MARKER); - - num_left -= 2; - - while (num_left) - { - get_bits(8); - num_left--; - } - } - - // Read a define restart interval (DRI) marker. - void jpeg_decoder::read_dri_marker() - { - if (get_bits(16) != 4) - stop_decoding(JPGD_BAD_DRI_LENGTH); - - m_restart_interval = get_bits(16); - } - - // Read a start of scan (SOS) marker. - void jpeg_decoder::read_sos_marker() - { - uint num_left; - int i, ci, n, c, cc; - - num_left = get_bits(16); - - n = get_bits(8); - - m_comps_in_scan = n; - - num_left -= 3; - - if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) ) - stop_decoding(JPGD_BAD_SOS_LENGTH); - - for (i = 0; i < n; i++) - { - cc = get_bits(8); - c = get_bits(8); - num_left -= 2; - - for (ci = 0; ci < m_comps_in_frame; ci++) - if (cc == m_comp_ident[ci]) - break; - - if (ci >= m_comps_in_frame) - stop_decoding(JPGD_BAD_SOS_COMP_ID); - - m_comp_list[i] = ci; - m_comp_dc_tab[ci] = (c >> 4) & 15; - m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1); - } - - m_spectral_start = get_bits(8); - m_spectral_end = get_bits(8); - m_successive_high = get_bits(4); - m_successive_low = get_bits(4); - - if (!m_progressive_flag) - { - m_spectral_start = 0; - m_spectral_end = 63; - } - - num_left -= 3; - - while (num_left) /* read past whatever is num_left */ - { - get_bits(8); - num_left--; - } - } - - // Finds the next marker. - int jpeg_decoder::next_marker() - { - uint c, bytes; - - bytes = 0; - - do - { - do - { - bytes++; - c = get_bits(8); - } while (c != 0xFF); - - do - { - c = get_bits(8); - } while (c == 0xFF); - - } while (c == 0); - - // If bytes > 0 here, there where extra bytes before the marker (not good). - - return c; - } - - // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is - // encountered. - int jpeg_decoder::process_markers() - { - int c; - - for ( ; ; ) - { - c = next_marker(); - - switch (c) - { - case M_SOF0: - case M_SOF1: - case M_SOF2: - case M_SOF3: - case M_SOF5: - case M_SOF6: - case M_SOF7: - // case M_JPG: - case M_SOF9: - case M_SOF10: - case M_SOF11: - case M_SOF13: - case M_SOF14: - case M_SOF15: - case M_SOI: - case M_EOI: - case M_SOS: - { - return c; - } - case M_DHT: - { - read_dht_marker(); - break; - } - // No arithmitic support - dumb patents! - case M_DAC: - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - case M_DQT: - { - read_dqt_marker(); - break; - } - case M_DRI: - { - read_dri_marker(); - break; - } - //case M_APP0: /* no need to read the JFIF marker */ - - case M_JPG: - case M_RST0: /* no parameters */ - case M_RST1: - case M_RST2: - case M_RST3: - case M_RST4: - case M_RST5: - case M_RST6: - case M_RST7: - case M_TEM: - { - stop_decoding(JPGD_UNEXPECTED_MARKER); - break; - } - default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */ - { - skip_variable_marker(); - break; - } - } - } - } - - // Finds the start of image (SOI) marker. - // This code is rather defensive: it only checks the first 512 bytes to avoid - // false positives. - void jpeg_decoder::locate_soi_marker() - { - uint lastchar, thischar; - uint bytesleft; - - lastchar = get_bits(8); - - thischar = get_bits(8); - - /* ok if it's a normal JPEG file without a special header */ - - if ((lastchar == 0xFF) && (thischar == M_SOI)) - return; - - bytesleft = 4096; //512; - - for ( ; ; ) - { - if (--bytesleft == 0) - stop_decoding(JPGD_NOT_JPEG); - - lastchar = thischar; - - thischar = get_bits(8); - - if (lastchar == 0xFF) - { - if (thischar == M_SOI) - break; - else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end - stop_decoding(JPGD_NOT_JPEG); - } - } - - // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad. - thischar = (m_bit_buf >> 24) & 0xFF; - - if (thischar != 0xFF) - stop_decoding(JPGD_NOT_JPEG); - } - - // Find a start of frame (SOF) marker. - void jpeg_decoder::locate_sof_marker() - { - locate_soi_marker(); - - int c = process_markers(); - - switch (c) - { - case M_SOF2: - m_progressive_flag = JPGD_TRUE; - case M_SOF0: /* baseline DCT */ - case M_SOF1: /* extended sequential DCT */ - { - read_sof_marker(); - break; - } - case M_SOF9: /* Arithmitic coding */ - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - default: - { - stop_decoding(JPGD_UNSUPPORTED_MARKER); - break; - } - } - } - - // Find a start of scan (SOS) marker. - int jpeg_decoder::locate_sos_marker() - { - int c; - - c = process_markers(); - - if (c == M_EOI) - return JPGD_FALSE; - else if (c != M_SOS) - stop_decoding(JPGD_UNEXPECTED_MARKER); - - read_sos_marker(); - - return JPGD_TRUE; - } - - // Reset everything to default/uninitialized state. - void jpeg_decoder::init(jpeg_decoder_stream *pStream) - { - m_pMem_blocks = NULL; - m_error_code = JPGD_SUCCESS; - m_ready_flag = false; - m_image_x_size = m_image_y_size = 0; - m_pStream = pStream; - m_progressive_flag = JPGD_FALSE; - - memset(m_huff_ac, 0, sizeof(m_huff_ac)); - memset(m_huff_num, 0, sizeof(m_huff_num)); - memset(m_huff_val, 0, sizeof(m_huff_val)); - memset(m_quant, 0, sizeof(m_quant)); - - m_scan_type = 0; - m_comps_in_frame = 0; - - memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp)); - memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp)); - memset(m_comp_quant, 0, sizeof(m_comp_quant)); - memset(m_comp_ident, 0, sizeof(m_comp_ident)); - memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks)); - memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks)); - - m_comps_in_scan = 0; - memset(m_comp_list, 0, sizeof(m_comp_list)); - memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab)); - memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab)); - - m_spectral_start = 0; - m_spectral_end = 0; - m_successive_low = 0; - m_successive_high = 0; - m_max_mcu_x_size = 0; - m_max_mcu_y_size = 0; - m_blocks_per_mcu = 0; - m_max_blocks_per_row = 0; - m_mcus_per_row = 0; - m_mcus_per_col = 0; - m_expanded_blocks_per_component = 0; - m_expanded_blocks_per_mcu = 0; - m_expanded_blocks_per_row = 0; - m_freq_domain_chroma_upsample = false; - - memset(m_mcu_org, 0, sizeof(m_mcu_org)); - - m_total_lines_left = 0; - m_mcu_lines_left = 0; - m_real_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_pixel = 0; - - memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs)); - - memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs)); - memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs)); - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_eob_run = 0; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_pIn_buf_ofs = m_in_buf; - m_in_buf_left = 0; - m_eof_flag = false; - m_tem_flag = 0; - - memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start)); - memset(m_in_buf, 0, sizeof(m_in_buf)); - memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end)); - - m_restart_interval = 0; - m_restarts_left = 0; - m_next_restart_num = 0; - - m_max_mcus_per_row = 0; - m_max_blocks_per_mcu = 0; - m_max_mcus_per_col = 0; - - memset(m_last_dc_val, 0, sizeof(m_last_dc_val)); - m_pMCU_coefficients = NULL; - m_pSample_buf = NULL; - - m_total_bytes_read = 0; - - m_pScan_line_0 = NULL; - m_pScan_line_1 = NULL; - - // Ready the input buffer. - prep_in_buffer(); - - // Prime the bit buffer. - m_bits_left = 16; - m_bit_buf = 0; - - get_bits(16); - get_bits(16); - - for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++) - m_mcu_block_max_zag[i] = 64; - } - -#define SCALEBITS 16 -#define ONE_HALF ((int) 1 << (SCALEBITS-1)) -#define FIX(x) ((int) ((x) * (1L<> SCALEBITS; - m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS; - m_crg[i] = (-FIX(0.71414f)) * k; - m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF; - } - } - - // This method throws back into the stream any bytes that where read - // into the bit buffer during initial marker scanning. - void jpeg_decoder::fix_in_buffer() - { - // In case any 0xFF's where pulled into the buffer during marker scanning. - JPGD_ASSERT((m_bits_left & 7) == 0); - - if (m_bits_left == 16) - stuff_char( (uint8)(m_bit_buf & 0xFF)); - - if (m_bits_left >= 8) - stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF)); - - stuff_char((uint8)((m_bit_buf >> 16) & 0xFF)); - stuff_char((uint8)((m_bit_buf >> 24) & 0xFF)); - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - void jpeg_decoder::transform_mcu(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64; - - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - } - - static const uint8 s_max_rc[64] = - { - 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86, - 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136 - }; - - void jpeg_decoder::transform_mcu_expand(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64; - - // Y IDCT - int mcu_block; - for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - - // Chroma IDCT, with upsampling - jpgd_block_t temp_block[64]; - - for (int i = 0; i < 2; i++) - { - DCT_Upsample::Matrix44 P, Q, R, S; - - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1); - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64); - - switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1]) - { - case 1*16+1: - DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr); - break; - case 1*16+2: - DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr); - break; - case 2*16+2: - DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+2: - DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+3: - DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr); - break; - case 3*16+4: - DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr); - break; - case 4*16+4: - DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+4: - DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+5: - DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr); - break; - case 5*16+6: - DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr); - break; - case 6*16+6: - DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+6: - DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+7: - DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr); - break; - case 7*16+8: - DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr); - break; - case 8*16+8: - DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr); - break; - default: - JPGD_ASSERT(false); - } - - DCT_Upsample::Matrix44 a(P + Q); P -= Q; - DCT_Upsample::Matrix44& b = P; - DCT_Upsample::Matrix44 c(R + S); R -= S; - DCT_Upsample::Matrix44& d = R; - - DCT_Upsample::Matrix44::add_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::add_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - pSrc_ptr += 64; - } - } - - // Loads and dequantizes the next row of (already decoded) coefficients. - // Progressive images only. - void jpeg_decoder::load_next_row() - { - int i; - jpgd_block_t *p; - jpgd_quant_t *q; - int mcu_row, mcu_block, row_block = 0; - int component_num, component_id; - int block_x_mcu[JPGD_MAX_COMPONENTS]; - - memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - q = m_quant[m_comp_quant[component_id]]; - - p = m_pMCU_coefficients + 64 * mcu_block; - - jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - p[0] = pDC[0]; - memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t)); - - for (i = 63; i > 0; i--) - if (p[g_ZAG[i]]) - break; - - m_mcu_block_max_zag[mcu_block] = i + 1; - - for ( ; i >= 0; i--) - if (p[g_ZAG[i]]) - p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]); - - row_block++; - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - - // Restart interval processing. - void jpeg_decoder::process_restart() - { - int i; - int c = 0; - - // Align to a byte boundry - // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers! - //get_bits_no_markers(m_bits_left & 7); - - // Let's scan a little bit to find the marker, but not _too_ far. - // 1536 is a "fudge factor" that determines how much to scan. - for (i = 1536; i > 0; i--) - if (get_char() == 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - for ( ; i > 0; i--) - if ((c = get_char()) != 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Is it the expected marker? If not, something bad happened. - if (c != (m_next_restart_num + M_RST0)) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Reset each component's DC prediction values. - memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - m_restarts_left = m_restart_interval; - - m_next_restart_num = (m_next_restart_num + 1) & 7; - - // Get the bit buffer going again... - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - static inline int dequantize_ac(int c, int q) { c *= q; return c; } - - // Decodes and dequantizes the next row of coefficients. - void jpeg_decoder::decode_next_row() - { - int row_block = 0; - - for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - jpgd_block_t* p = m_pMCU_coefficients; - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64) - { - int component_id = m_mcu_org[mcu_block]; - jpgd_quant_t* q = m_quant[m_comp_quant[component_id]]; - - int r, s; - s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r); - s = HUFF_EXTEND(r, s); - - m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]); - - p[0] = static_cast(s * q[0]); - - int prev_num_set = m_mcu_block_max_zag[mcu_block]; - - huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]]; - - int k; - for (k = 1; k < 64; k++) - { - int extra_bits; - s = huff_decode(pH, extra_bits); - - r = s >> 4; - s &= 15; - - if (s) - { - if (r) - { - if ((k + r) > 63) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(r, prev_num_set - k); - int kt = k; - while (n--) - p[g_ZAG[kt++]] = 0; - } - - k += r; - } - - s = HUFF_EXTEND(extra_bits, s); - - JPGD_ASSERT(k < 64); - - p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k]; - } - else - { - if (r == 15) - { - if ((k + 16) > 64) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(16, prev_num_set - k); - int kt = k; - while (n--) - { - JPGD_ASSERT(kt <= 63); - p[g_ZAG[kt++]] = 0; - } - } - - k += 16 - 1; // - 1 because the loop counter is k - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0); - // END EPIC MOD - } - else - break; - } - } - - if (k < prev_num_set) - { - int kt = k; - while (kt < prev_num_set) - p[g_ZAG[kt++]] = 0; - } - - m_mcu_block_max_zag[mcu_block] = k; - - row_block++; - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - - m_restarts_left--; - } - } - - // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB - void jpeg_decoder::H1V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int y = s[j]; - int cb = s[64+j]; - int cr = s[128+j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - d += 4; - } - - s += 64*3; - } - } - - // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H2V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *y = m_pSample_buf + row * 8; - uint8 *c = m_pSample_buf + 2*64 + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 4; j++) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j<<1]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - } - - d0 += 8; - - c++; - } - y += 64; - } - - y += 64*4 - 64*2; - c += 64*4 - 8; - } - } - - // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H1V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*1 + (row & 7) * 8; - - c = m_pSample_buf + 64*2 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int cb = c[0+j]; - int cr = c[64+j]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - } - - d0 += 4; - d1 += 4; - } - - y += 64*4; - c += 64*4; - } - } - - // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB - void jpeg_decoder::H2V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*2 + (row & 7) * 8; - - c = m_pSample_buf + 64*4 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 8; j += 2) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+bc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+rc); - d1[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+rc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+bc); - d1[7] = 255; - } - - d0 += 8; - d1 += 8; - - c++; - } - y += 64; - } - - y += 64*6 - 64*2; - c += 64*6 - 8; - } - } - - // Y (1 block per MCU) to 8-bit grayscale - void jpeg_decoder::gray_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - *(uint *)d = *(uint *)s; - *(uint *)(&d[4]) = *(uint *)(&s[4]); - - s += 64; - d += 8; - } - } - - void jpeg_decoder::expanded_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - - uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8; - - uint8* d = m_pScan_line_0; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int k = 0; k < m_max_mcu_x_size; k += 8) - { - const int Y_ofs = k * 8; - const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component; - const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2; - for (int j = 0; j < 8; j++) - { - int y = Py[Y_ofs + j]; - int cb = Py[Cb_ofs + j]; - int cr = Py[Cr_ofs + j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - - d += 4; - } - } - - Py += 64 * m_expanded_blocks_per_mcu; - } - } - - // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream. - void jpeg_decoder::find_eoi() - { - if (!m_progressive_flag) - { - // Attempt to read the EOI marker. - //get_bits_no_markers(m_bits_left & 7); - - // Prime the bit buffer - m_bits_left = 16; - get_bits(16); - get_bits(16); - - // The next marker _should_ be EOI - process_markers(); - } - - m_total_bytes_read -= m_in_buf_left; - } - - int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len) - { - if ((m_error_code) || (!m_ready_flag)) - return JPGD_FAILED; - - if (m_total_lines_left == 0) - return JPGD_DONE; - - if (m_mcu_lines_left == 0) - { - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - if (m_progressive_flag) - load_next_row(); - else - decode_next_row(); - - // Find the EOI marker if that was the last row. - if (m_total_lines_left <= m_max_mcu_y_size) - find_eoi(); - - m_mcu_lines_left = m_max_mcu_y_size; - } - - if (m_freq_domain_chroma_upsample) - { - expanded_convert(); - *pScan_line = m_pScan_line_0; - } - else - { - switch (m_scan_type) - { - case JPGD_YH2V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H2V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH2V1: - { - H2V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_YH1V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H1V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH1V1: - { - H1V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_GRAYSCALE: - { - gray_convert(); - *pScan_line = m_pScan_line_0; - - break; - } - } - } - - *pScan_line_len = m_real_dest_bytes_per_scan_line; - - m_mcu_lines_left--; - m_total_lines_left--; - - return JPGD_SUCCESS; - } - - // Creates the tables needed for efficient Huffman decoding. - void jpeg_decoder::make_huff_table(int index, huff_tables *pH) - { - int p, i, l, si; - uint8 huffsize[257]; - uint huffcode[257]; - uint code; - uint subtree; - int code_size; - int lastp; - int nextfreeentry; - int currententry; - - pH->ac_table = m_huff_ac[index] != 0; - - p = 0; - - for (l = 1; l <= 16; l++) - { - for (i = 1; i <= m_huff_num[index][l]; i++) - huffsize[p++] = static_cast(l); - } - - huffsize[p] = 0; - - lastp = p; - - code = 0; - si = huffsize[0]; - p = 0; - - while (huffsize[p]) - { - while (huffsize[p] == si) - { - huffcode[p++] = code; - code++; - } - - code <<= 1; - si++; - } - - memset(pH->look_up, 0, sizeof(pH->look_up)); - memset(pH->look_up2, 0, sizeof(pH->look_up2)); - memset(pH->tree, 0, sizeof(pH->tree)); - memset(pH->code_size, 0, sizeof(pH->code_size)); - - nextfreeentry = -1; - - p = 0; - - while (p < lastp) - { - i = m_huff_val[index][p]; - code = huffcode[p]; - code_size = huffsize[p]; - - pH->code_size[i] = static_cast(code_size); - - if (code_size <= 8) - { - code <<= (8 - code_size); - - for (l = 1 << (8 - code_size); l > 0; l--) - { - JPGD_ASSERT(i < 256); - - pH->look_up[code] = i; - - bool has_extrabits = false; - int extra_bits = 0; - int num_extra_bits = i & 15; - - int bits_to_fetch = code_size; - if (num_extra_bits) - { - int total_codesize = code_size + num_extra_bits; - if (total_codesize <= 8) - { - has_extrabits = true; - extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize)); - JPGD_ASSERT(extra_bits <= 0x7FFF); - bits_to_fetch += num_extra_bits; - } - } - - if (!has_extrabits) - pH->look_up2[code] = i | (bits_to_fetch << 8); - else - pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8); - - code++; - } - } - else - { - subtree = (code >> (code_size - 8)) & 0xFF; - - currententry = pH->look_up[subtree]; - - if (currententry == 0) - { - pH->look_up[subtree] = currententry = nextfreeentry; - pH->look_up2[subtree] = currententry = nextfreeentry; - - nextfreeentry -= 2; - } - - code <<= (16 - (code_size - 8)); - - for (l = code_size; l > 9; l--) - { - if ((code & 0x8000) == 0) - currententry--; - - if (pH->tree[-currententry - 1] == 0) - { - pH->tree[-currententry - 1] = nextfreeentry; - - currententry = nextfreeentry; - - nextfreeentry -= 2; - } - else - currententry = pH->tree[-currententry - 1]; - - code <<= 1; - } - - if ((code & 0x8000) == 0) - currententry--; - - pH->tree[-currententry - 1] = i; - } - - p++; - } - } - - // Verifies the quantization tables needed for this scan are available. - void jpeg_decoder::check_quant_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL) - stop_decoding(JPGD_UNDEFINED_QUANT_TABLE); - } - - // Verifies that all the Huffman tables needed for this scan are available. - void jpeg_decoder::check_huff_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - { - if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - - if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - } - - for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++) - if (m_huff_num[i]) - { - if (!m_pHuff_tabs[i]) - m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables)); - - make_huff_table(i, m_pHuff_tabs[i]); - } - } - - // Determines the component order inside each MCU. - // Also calcs how many MCU's are on each row, etc. - void jpeg_decoder::calc_mcu_block_order() - { - int component_num, component_id; - int max_h_samp = 0, max_v_samp = 0; - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - if (m_comp_h_samp[component_id] > max_h_samp) - max_h_samp = m_comp_h_samp[component_id]; - - if (m_comp_v_samp[component_id] > max_v_samp) - max_v_samp = m_comp_v_samp[component_id]; - } - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8; - m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8; - } - - if (m_comps_in_scan == 1) - { - m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]]; - m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]]; - } - else - { - m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp; - m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp; - } - - if (m_comps_in_scan == 1) - { - m_mcu_org[0] = m_comp_list[0]; - - m_blocks_per_mcu = 1; - } - else - { - m_blocks_per_mcu = 0; - - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - int num_blocks; - - component_id = m_comp_list[component_num]; - - num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id]; - - while (num_blocks--) - m_mcu_org[m_blocks_per_mcu++] = component_id; - } - } - } - - // Starts a new scan. - int jpeg_decoder::init_scan() - { - if (!locate_sos_marker()) - return JPGD_FALSE; - - calc_mcu_block_order(); - - check_huff_tables(); - - check_quant_tables(); - - memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - if (m_restart_interval) - { - m_restarts_left = m_restart_interval; - m_next_restart_num = 0; - } - - fix_in_buffer(); - - return JPGD_TRUE; - } - - // Starts a frame. Determines if the number of components or sampling factors - // are supported. - void jpeg_decoder::init_frame() - { - int i; - - if (m_comps_in_frame == 1) - { - if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1)) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - m_scan_type = JPGD_GRAYSCALE; - m_max_blocks_per_mcu = 1; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if (m_comps_in_frame == 3) - { - if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) || - ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) ) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH1V1; - - m_max_blocks_per_mcu = 3; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH2V1; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH1V2; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 16; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH2V2; - m_max_blocks_per_mcu = 6; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 16; - } - else - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - } - else - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size; - m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size; - - // These values are for the *destination* pixels: after conversion. - if (m_scan_type == JPGD_GRAYSCALE) - m_dest_bytes_per_pixel = 1; - else - m_dest_bytes_per_pixel = 4; - - m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel; - - m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel); - - // Initialize two scan line buffers. - m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2)) - m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - - m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu; - - // Should never happen - if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW) - stop_decoding(JPGD_ASSERTION_ERROR); - - // Allocate the coefficient buffer, enough for one MCU - m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t)); - - for (i = 0; i < m_max_blocks_per_mcu; i++) - m_mcu_block_max_zag[i] = 64; - - m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0]; - m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame; - m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu; - // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor. -// BEGIN EPIC MOD -#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING - m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3); -#else - m_freq_domain_chroma_upsample = 0; -#endif -// END EPIC MOD - - if (m_freq_domain_chroma_upsample) - m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64); - else - m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64); - - m_total_lines_left = m_image_y_size; - - m_mcu_lines_left = 0; - - create_look_ups(); - } - - // The coeff_buf series of methods originally stored the coefficients - // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache - // was used to make this process more efficient. Now, we can store the entire - // thing in RAM. - jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y) - { - coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf)); - - cb->block_num_x = block_num_x; - cb->block_num_y = block_num_y; - cb->block_len_x = block_len_x; - cb->block_len_y = block_len_y; - cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t); - cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true); - return cb; - } - - inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y) - { - JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y)); - return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x)); - } - - // The following methods decode the various types of m_blocks encountered - // in progressively encoded images. - void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, r; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0) - { - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - } - - pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]); - - p[0] = static_cast(s << pD->m_successive_low); - } - - void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - if (pD->get_bits_no_markers(1)) - { - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - p[0] |= (1 << pD->m_successive_low); - } - } - - void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int k, s, r; - - if (pD->m_eob_run) - { - pD->m_eob_run--; - return; - } - - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if ((k += r) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - - p[g_ZAG[k]] = static_cast(s << pD->m_successive_low); - } - else - { - if (r == 15) - { - if ((k += 15) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - } - else - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - pD->m_eob_run--; - - break; - } - } - } - } - - void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, k, r; - int p1 = 1 << pD->m_successive_low; - int m1 = (-1) << pD->m_successive_low; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - k = pD->m_spectral_start; - - if (pD->m_eob_run == 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if (s != 1) - pD->stop_decoding(JPGD_DECODE_ERROR); - - if (pD->get_bits_no_markers(1)) - s = p1; - else - s = m1; - } - else - { - if (r != 15) - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - break; - } - } - - do - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - else - { - if (--r < 0) - break; - } - - k++; - - } while (k <= pD->m_spectral_end); - - if ((s) && (k < 64)) - { - p[g_ZAG[k]] = static_cast(s); - } - } - } - - if (pD->m_eob_run > 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - } - - pD->m_eob_run--; - } - } - - // Decode a scan in a progressively encoded image. - void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func) - { - int mcu_row, mcu_col, mcu_block; - int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS]; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++) - { - int component_num, component_id; - - memset(block_x_mcu, 0, sizeof(block_x_mcu)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - - decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - m_restarts_left--; - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - } - - // Decode a progressively encoded image. - void jpeg_decoder::init_progressive() - { - int i; - - if (m_comps_in_frame == 4) - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - // Allocate the coefficient buffers. - for (i = 0; i < m_comps_in_frame; i++) - { - m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1); - m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8); - } - - for ( ; ; ) - { - int dc_only_scan, refinement_scan; - pDecode_block_func decode_block_func; - - if (!init_scan()) - break; - - dc_only_scan = (m_spectral_start == 0); - refinement_scan = (m_successive_high != 0); - - if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63)) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if (dc_only_scan) - { - if (m_spectral_end) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - } - else if (m_comps_in_scan != 1) /* AC scans can only contain one component */ - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if ((refinement_scan) && (m_successive_low != m_successive_high - 1)) - stop_decoding(JPGD_BAD_SOS_SUCCESSIVE); - - if (dc_only_scan) - { - if (refinement_scan) - decode_block_func = decode_block_dc_refine; - else - decode_block_func = decode_block_dc_first; - } - else - { - if (refinement_scan) - decode_block_func = decode_block_ac_refine; - else - decode_block_func = decode_block_ac_first; - } - - decode_scan(decode_block_func); - - m_bits_left = 16; - get_bits(16); - get_bits(16); - } - - m_comps_in_scan = m_comps_in_frame; - - for (i = 0; i < m_comps_in_frame; i++) - m_comp_list[i] = i; - - calc_mcu_block_order(); - } - - void jpeg_decoder::init_sequential() - { - if (!init_scan()) - stop_decoding(JPGD_UNEXPECTED_MARKER); - } - - void jpeg_decoder::decode_start() - { - init_frame(); - - if (m_progressive_flag) - init_progressive(); - else - init_sequential(); - } - - void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream) - { - init(pStream); - locate_sof_marker(); - } - - jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream) - { - if (setjmp(m_jmp_state)) - return; - decode_init(pStream); - } - - int jpeg_decoder::begin_decoding() - { - if (m_ready_flag) - return JPGD_SUCCESS; - - if (m_error_code) - return JPGD_FAILED; - - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - decode_start(); - - m_ready_flag = true; - - return JPGD_SUCCESS; - } - - jpeg_decoder::~jpeg_decoder() - { - free_all_blocks(); - } - - jpeg_decoder_file_stream::jpeg_decoder_file_stream() - { - m_pFile = NULL; - m_eof_flag = false; - m_error_flag = false; - } - - void jpeg_decoder_file_stream::close() - { - if (m_pFile) - { - fclose(m_pFile); - m_pFile = NULL; - } - - m_eof_flag = false; - m_error_flag = false; - } - - jpeg_decoder_file_stream::~jpeg_decoder_file_stream() - { - close(); - } - - bool jpeg_decoder_file_stream::open(const char *Pfilename) - { - close(); - - m_eof_flag = false; - m_error_flag = false; - -#if defined(_MSC_VER) - m_pFile = NULL; - fopen_s(&m_pFile, Pfilename, "rb"); -#else - m_pFile = fopen(Pfilename, "rb"); -#endif - return m_pFile != NULL; - } - - int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - if (!m_pFile) - return -1; - - if (m_eof_flag) - { - *pEOF_flag = true; - return 0; - } - - if (m_error_flag) - return -1; - - int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile)); - if (bytes_read < max_bytes_to_read) - { - if (ferror(m_pFile)) - { - m_error_flag = true; - return -1; - } - - m_eof_flag = true; - *pEOF_flag = true; - } - - return bytes_read; - } - - bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size) - { - close(); - m_pSrc_data = pSrc_data; - m_ofs = 0; - m_size = size; - return true; - } - - int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - *pEOF_flag = false; - - if (!m_pSrc_data) - return -1; - - uint bytes_remaining = m_size - m_ofs; - if ((uint)max_bytes_to_read > bytes_remaining) - { - max_bytes_to_read = bytes_remaining; - *pEOF_flag = true; - } - - memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read); - m_ofs += max_bytes_to_read; - - return max_bytes_to_read; - } - - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps) - { - if (!actual_comps) - return NULL; - *actual_comps = 0; - - if ((!pStream) || (!width) || (!height) || (!req_comps)) - return NULL; - - if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4)) - return NULL; - - jpeg_decoder decoder(pStream); - if (decoder.get_error_code() != JPGD_SUCCESS) - return NULL; - - const int image_width = decoder.get_width(), image_height = decoder.get_height(); - *width = image_width; - *height = image_height; - *actual_comps = decoder.get_num_components(); - - if (decoder.begin_decoding() != JPGD_SUCCESS) - return NULL; - - const int dst_bpl = image_width * req_comps; - - uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height); - if (!pImage_data) - return NULL; - - for (int y = 0; y < image_height; y++) - { - const uint8* pScan_line = 0; - uint scan_line_len; - if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS) - { - jpgd_free(pImage_data); - return NULL; - } - - uint8 *pDst = pImage_data + y * dst_bpl; - - if (((req_comps == 4) && (decoder.get_num_components() == 3)) || - ((req_comps == 1) && (decoder.get_num_components() == 1))) - { - memcpy(pDst, pScan_line, dst_bpl); - } - else if (decoder.get_num_components() == 1) - { - if (req_comps == 3) - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst += 3; - } - } - else - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst[3] = 255; - pDst += 4; - } - } - } - else if (decoder.get_num_components() == 3) - { - if (req_comps == 1) - { - const int YR = 19595, YG = 38470, YB = 7471; - for (int x = 0; x < image_width; x++) - { - int r = pScan_line[x*4+0]; - int g = pScan_line[x*4+1]; - int b = pScan_line[x*4+2]; - *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - } - } - else - { - for (int x = 0; x < image_width; x++) - { - pDst[0] = pScan_line[x*4+0]; - pDst[1] = pScan_line[x*4+1]; - pDst[2] = pScan_line[x*4+2]; - pDst += 3; - } - } - } - } - - return pImage_data; - } - -// BEGIN EPIC MOD - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format) - { - jpg_format = (ERGBFormatJPG)format; -// EMD EPIC MOD - jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size); - return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps); - } - - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps) - { - jpgd::jpeg_decoder_file_stream file_stream; - if (!file_stream.open(pSrc_filename)) - return NULL; - return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps); - } - -} // namespace jpgd diff --git a/spaces/chansung/LLM-As-Chatbot/models/koalpaca.py b/spaces/chansung/LLM-As-Chatbot/models/koalpaca.py deleted file mode 100644 index 1afde3ba20e9e2d97f17c87dfab263cb073f76d6..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLM-As-Chatbot/models/koalpaca.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer -from optimum.bettertransformer import BetterTransformer - -def load_model( - base, - finetuned, - mode_cpu, - mode_mps, - mode_full_gpu, - mode_8bit, - mode_4bit, - force_download_ckpt -): - tokenizer = AutoTokenizer.from_pretrained(base) - - if mode_cpu: - print("cpu mode") - model = AutoModelForCausalLM.from_pretrained( - base, - device_map={"": "cpu"}, - use_safetensors=False - ) - - elif mode_mps: - print("mps mode") - model = AutoModelForCausalLM.from_pretrained( - base, - device_map={"": "mps"}, - torch_dtype=torch.float16, - use_safetensors=False - ) - - else: - print("gpu mode") - print(f"8bit = {mode_8bit}, 4bit = {mode_4bit}") - model = AutoModelForCausalLM.from_pretrained( - base, - load_in_8bit=mode_8bit, - load_in_4bit=mode_4bit, - torch_dtype=torch.float16, - device_map="auto", - use_safetensors=False - ) - - if not mode_8bit and not mode_4bit: - model.half() - - # model = BetterTransformer.transform(model) - return model, tokenizer \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/token-classification/run_no_trainer.sh b/spaces/chendl/compositional_test/transformers/examples/pytorch/token-classification/run_no_trainer.sh deleted file mode 100644 index bf9cbb7223cbbbb4cbab71a9c32e4170512c3c89..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/token-classification/run_no_trainer.sh +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -accelerate launch run_ner_no_trainer.py \ - --model_name_or_path bert-base-uncased \ - --dataset_name conll2003 \ - --output_dir /tmp/test-ner \ - --pad_to_max_length \ - --task_name ner \ - --return_entity_level_metrics diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/connector.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/connector.py deleted file mode 100644 index bf40689d81b53cd34550e9d8949767385ecd916d..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/connector.py +++ /dev/null @@ -1,1453 +0,0 @@ -import asyncio -import functools -import random -import sys -import traceback -import warnings -from collections import defaultdict, deque -from contextlib import suppress -from http.cookies import SimpleCookie -from itertools import cycle, islice -from time import monotonic -from types import TracebackType -from typing import ( - TYPE_CHECKING, - Any, - Awaitable, - Callable, - DefaultDict, - Dict, - Iterator, - List, - Optional, - Set, - Tuple, - Type, - Union, - cast, -) - -import attr - -from . import hdrs, helpers -from .abc import AbstractResolver -from .client_exceptions import ( - ClientConnectionError, - ClientConnectorCertificateError, - ClientConnectorError, - ClientConnectorSSLError, - ClientHttpProxyError, - ClientProxyConnectionError, - ServerFingerprintMismatch, - UnixClientConnectorError, - cert_errors, - ssl_errors, -) -from .client_proto import ResponseHandler -from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params -from .helpers import ( - PY_36, - ceil_timeout, - get_running_loop, - is_ip_address, - noop, - sentinel, -) -from .http import RESPONSES -from .locks import EventResultOrError -from .resolver import DefaultResolver - -try: - import ssl - - SSLContext = ssl.SSLContext -except ImportError: # pragma: no cover - ssl = None # type: ignore[assignment] - SSLContext = object # type: ignore[misc,assignment] - - -__all__ = ("BaseConnector", "TCPConnector", "UnixConnector", "NamedPipeConnector") - - -if TYPE_CHECKING: # pragma: no cover - from .client import ClientTimeout - from .client_reqrep import ConnectionKey - from .tracing import Trace - - -class _DeprecationWaiter: - __slots__ = ("_awaitable", "_awaited") - - def __init__(self, awaitable: Awaitable[Any]) -> None: - self._awaitable = awaitable - self._awaited = False - - def __await__(self) -> Any: - self._awaited = True - return self._awaitable.__await__() - - def __del__(self) -> None: - if not self._awaited: - warnings.warn( - "Connector.close() is a coroutine, " - "please use await connector.close()", - DeprecationWarning, - ) - - -class Connection: - - _source_traceback = None - _transport = None - - def __init__( - self, - connector: "BaseConnector", - key: "ConnectionKey", - protocol: ResponseHandler, - loop: asyncio.AbstractEventLoop, - ) -> None: - self._key = key - self._connector = connector - self._loop = loop - self._protocol: Optional[ResponseHandler] = protocol - self._callbacks: List[Callable[[], None]] = [] - - if loop.get_debug(): - self._source_traceback = traceback.extract_stack(sys._getframe(1)) - - def __repr__(self) -> str: - return f"Connection<{self._key}>" - - def __del__(self, _warnings: Any = warnings) -> None: - if self._protocol is not None: - if PY_36: - kwargs = {"source": self} - else: - kwargs = {} - _warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs) - if self._loop.is_closed(): - return - - self._connector._release(self._key, self._protocol, should_close=True) - - context = {"client_connection": self, "message": "Unclosed connection"} - if self._source_traceback is not None: - context["source_traceback"] = self._source_traceback - self._loop.call_exception_handler(context) - - @property - def loop(self) -> asyncio.AbstractEventLoop: - warnings.warn( - "connector.loop property is deprecated", DeprecationWarning, stacklevel=2 - ) - return self._loop - - @property - def transport(self) -> Optional[asyncio.Transport]: - if self._protocol is None: - return None - return self._protocol.transport - - @property - def protocol(self) -> Optional[ResponseHandler]: - return self._protocol - - def add_callback(self, callback: Callable[[], None]) -> None: - if callback is not None: - self._callbacks.append(callback) - - def _notify_release(self) -> None: - callbacks, self._callbacks = self._callbacks[:], [] - - for cb in callbacks: - with suppress(Exception): - cb() - - def close(self) -> None: - self._notify_release() - - if self._protocol is not None: - self._connector._release(self._key, self._protocol, should_close=True) - self._protocol = None - - def release(self) -> None: - self._notify_release() - - if self._protocol is not None: - self._connector._release( - self._key, self._protocol, should_close=self._protocol.should_close - ) - self._protocol = None - - @property - def closed(self) -> bool: - return self._protocol is None or not self._protocol.is_connected() - - -class _TransportPlaceholder: - """placeholder for BaseConnector.connect function""" - - def close(self) -> None: - pass - - -class BaseConnector: - """Base connector class. - - keepalive_timeout - (optional) Keep-alive timeout. - force_close - Set to True to force close and do reconnect - after each request (and between redirects). - limit - The total number of simultaneous connections. - limit_per_host - Number of simultaneous connections to one host. - enable_cleanup_closed - Enables clean-up closed ssl transports. - Disabled by default. - loop - Optional event loop. - """ - - _closed = True # prevent AttributeError in __del__ if ctor was failed - _source_traceback = None - - # abort transport after 2 seconds (cleanup broken connections) - _cleanup_closed_period = 2.0 - - def __init__( - self, - *, - keepalive_timeout: Union[object, None, float] = sentinel, - force_close: bool = False, - limit: int = 100, - limit_per_host: int = 0, - enable_cleanup_closed: bool = False, - loop: Optional[asyncio.AbstractEventLoop] = None, - ) -> None: - - if force_close: - if keepalive_timeout is not None and keepalive_timeout is not sentinel: - raise ValueError( - "keepalive_timeout cannot " "be set if force_close is True" - ) - else: - if keepalive_timeout is sentinel: - keepalive_timeout = 15.0 - - loop = get_running_loop(loop) - - self._closed = False - if loop.get_debug(): - self._source_traceback = traceback.extract_stack(sys._getframe(1)) - - self._conns: Dict[ConnectionKey, List[Tuple[ResponseHandler, float]]] = {} - self._limit = limit - self._limit_per_host = limit_per_host - self._acquired: Set[ResponseHandler] = set() - self._acquired_per_host: DefaultDict[ - ConnectionKey, Set[ResponseHandler] - ] = defaultdict(set) - self._keepalive_timeout = cast(float, keepalive_timeout) - self._force_close = force_close - - # {host_key: FIFO list of waiters} - self._waiters = defaultdict(deque) # type: ignore[var-annotated] - - self._loop = loop - self._factory = functools.partial(ResponseHandler, loop=loop) - - self.cookies: SimpleCookie[str] = SimpleCookie() - - # start keep-alive connection cleanup task - self._cleanup_handle: Optional[asyncio.TimerHandle] = None - - # start cleanup closed transports task - self._cleanup_closed_handle: Optional[asyncio.TimerHandle] = None - self._cleanup_closed_disabled = not enable_cleanup_closed - self._cleanup_closed_transports: List[Optional[asyncio.Transport]] = [] - self._cleanup_closed() - - def __del__(self, _warnings: Any = warnings) -> None: - if self._closed: - return - if not self._conns: - return - - conns = [repr(c) for c in self._conns.values()] - - self._close() - - if PY_36: - kwargs = {"source": self} - else: - kwargs = {} - _warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs) - context = { - "connector": self, - "connections": conns, - "message": "Unclosed connector", - } - if self._source_traceback is not None: - context["source_traceback"] = self._source_traceback - self._loop.call_exception_handler(context) - - def __enter__(self) -> "BaseConnector": - warnings.warn( - '"with Connector():" is deprecated, ' - 'use "async with Connector():" instead', - DeprecationWarning, - ) - return self - - def __exit__(self, *exc: Any) -> None: - self._close() - - async def __aenter__(self) -> "BaseConnector": - return self - - async def __aexit__( - self, - exc_type: Optional[Type[BaseException]] = None, - exc_value: Optional[BaseException] = None, - exc_traceback: Optional[TracebackType] = None, - ) -> None: - await self.close() - - @property - def force_close(self) -> bool: - """Ultimately close connection on releasing if True.""" - return self._force_close - - @property - def limit(self) -> int: - """The total number for simultaneous connections. - - If limit is 0 the connector has no limit. - The default limit size is 100. - """ - return self._limit - - @property - def limit_per_host(self) -> int: - """The limit for simultaneous connections to the same endpoint. - - Endpoints are the same if they are have equal - (host, port, is_ssl) triple. - """ - return self._limit_per_host - - def _cleanup(self) -> None: - """Cleanup unused transports.""" - if self._cleanup_handle: - self._cleanup_handle.cancel() - # _cleanup_handle should be unset, otherwise _release() will not - # recreate it ever! - self._cleanup_handle = None - - now = self._loop.time() - timeout = self._keepalive_timeout - - if self._conns: - connections = {} - deadline = now - timeout - for key, conns in self._conns.items(): - alive = [] - for proto, use_time in conns: - if proto.is_connected(): - if use_time - deadline < 0: - transport = proto.transport - proto.close() - if key.is_ssl and not self._cleanup_closed_disabled: - self._cleanup_closed_transports.append(transport) - else: - alive.append((proto, use_time)) - else: - transport = proto.transport - proto.close() - if key.is_ssl and not self._cleanup_closed_disabled: - self._cleanup_closed_transports.append(transport) - - if alive: - connections[key] = alive - - self._conns = connections - - if self._conns: - self._cleanup_handle = helpers.weakref_handle( - self, "_cleanup", timeout, self._loop - ) - - def _drop_acquired_per_host( - self, key: "ConnectionKey", val: ResponseHandler - ) -> None: - acquired_per_host = self._acquired_per_host - if key not in acquired_per_host: - return - conns = acquired_per_host[key] - conns.remove(val) - if not conns: - del self._acquired_per_host[key] - - def _cleanup_closed(self) -> None: - """Double confirmation for transport close. - - Some broken ssl servers may leave socket open without proper close. - """ - if self._cleanup_closed_handle: - self._cleanup_closed_handle.cancel() - - for transport in self._cleanup_closed_transports: - if transport is not None: - transport.abort() - - self._cleanup_closed_transports = [] - - if not self._cleanup_closed_disabled: - self._cleanup_closed_handle = helpers.weakref_handle( - self, "_cleanup_closed", self._cleanup_closed_period, self._loop - ) - - def close(self) -> Awaitable[None]: - """Close all opened transports.""" - self._close() - return _DeprecationWaiter(noop()) - - def _close(self) -> None: - if self._closed: - return - - self._closed = True - - try: - if self._loop.is_closed(): - return - - # cancel cleanup task - if self._cleanup_handle: - self._cleanup_handle.cancel() - - # cancel cleanup close task - if self._cleanup_closed_handle: - self._cleanup_closed_handle.cancel() - - for data in self._conns.values(): - for proto, t0 in data: - proto.close() - - for proto in self._acquired: - proto.close() - - for transport in self._cleanup_closed_transports: - if transport is not None: - transport.abort() - - finally: - self._conns.clear() - self._acquired.clear() - self._waiters.clear() - self._cleanup_handle = None - self._cleanup_closed_transports.clear() - self._cleanup_closed_handle = None - - @property - def closed(self) -> bool: - """Is connector closed. - - A readonly property. - """ - return self._closed - - def _available_connections(self, key: "ConnectionKey") -> int: - """ - Return number of available connections. - - The limit, limit_per_host and the connection key are taken into account. - - If it returns less than 1 means that there are no connections - available. - """ - if self._limit: - # total calc available connections - available = self._limit - len(self._acquired) - - # check limit per host - if ( - self._limit_per_host - and available > 0 - and key in self._acquired_per_host - ): - acquired = self._acquired_per_host.get(key) - assert acquired is not None - available = self._limit_per_host - len(acquired) - - elif self._limit_per_host and key in self._acquired_per_host: - # check limit per host - acquired = self._acquired_per_host.get(key) - assert acquired is not None - available = self._limit_per_host - len(acquired) - else: - available = 1 - - return available - - async def connect( - self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" - ) -> Connection: - """Get from pool or create new connection.""" - key = req.connection_key - available = self._available_connections(key) - - # Wait if there are no available connections or if there are/were - # waiters (i.e. don't steal connection from a waiter about to wake up) - if available <= 0 or key in self._waiters: - fut = self._loop.create_future() - - # This connection will now count towards the limit. - self._waiters[key].append(fut) - - if traces: - for trace in traces: - await trace.send_connection_queued_start() - - try: - await fut - except BaseException as e: - if key in self._waiters: - # remove a waiter even if it was cancelled, normally it's - # removed when it's notified - try: - self._waiters[key].remove(fut) - except ValueError: # fut may no longer be in list - pass - - raise e - finally: - if key in self._waiters and not self._waiters[key]: - del self._waiters[key] - - if traces: - for trace in traces: - await trace.send_connection_queued_end() - - proto = self._get(key) - if proto is None: - placeholder = cast(ResponseHandler, _TransportPlaceholder()) - self._acquired.add(placeholder) - self._acquired_per_host[key].add(placeholder) - - if traces: - for trace in traces: - await trace.send_connection_create_start() - - try: - proto = await self._create_connection(req, traces, timeout) - if self._closed: - proto.close() - raise ClientConnectionError("Connector is closed.") - except BaseException: - if not self._closed: - self._acquired.remove(placeholder) - self._drop_acquired_per_host(key, placeholder) - self._release_waiter() - raise - else: - if not self._closed: - self._acquired.remove(placeholder) - self._drop_acquired_per_host(key, placeholder) - - if traces: - for trace in traces: - await trace.send_connection_create_end() - else: - if traces: - # Acquire the connection to prevent race conditions with limits - placeholder = cast(ResponseHandler, _TransportPlaceholder()) - self._acquired.add(placeholder) - self._acquired_per_host[key].add(placeholder) - for trace in traces: - await trace.send_connection_reuseconn() - self._acquired.remove(placeholder) - self._drop_acquired_per_host(key, placeholder) - - self._acquired.add(proto) - self._acquired_per_host[key].add(proto) - return Connection(self, key, proto, self._loop) - - def _get(self, key: "ConnectionKey") -> Optional[ResponseHandler]: - try: - conns = self._conns[key] - except KeyError: - return None - - t1 = self._loop.time() - while conns: - proto, t0 = conns.pop() - if proto.is_connected(): - if t1 - t0 > self._keepalive_timeout: - transport = proto.transport - proto.close() - # only for SSL transports - if key.is_ssl and not self._cleanup_closed_disabled: - self._cleanup_closed_transports.append(transport) - else: - if not conns: - # The very last connection was reclaimed: drop the key - del self._conns[key] - return proto - else: - transport = proto.transport - proto.close() - if key.is_ssl and not self._cleanup_closed_disabled: - self._cleanup_closed_transports.append(transport) - - # No more connections: drop the key - del self._conns[key] - return None - - def _release_waiter(self) -> None: - """ - Iterates over all waiters until one to be released is found. - - The one to be released is not finsihed and - belongs to a host that has available connections. - """ - if not self._waiters: - return - - # Having the dict keys ordered this avoids to iterate - # at the same order at each call. - queues = list(self._waiters.keys()) - random.shuffle(queues) - - for key in queues: - if self._available_connections(key) < 1: - continue - - waiters = self._waiters[key] - while waiters: - waiter = waiters.popleft() - if not waiter.done(): - waiter.set_result(None) - return - - def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None: - if self._closed: - # acquired connection is already released on connector closing - return - - try: - self._acquired.remove(proto) - self._drop_acquired_per_host(key, proto) - except KeyError: # pragma: no cover - # this may be result of undetermenistic order of objects - # finalization due garbage collection. - pass - else: - self._release_waiter() - - def _release( - self, - key: "ConnectionKey", - protocol: ResponseHandler, - *, - should_close: bool = False, - ) -> None: - if self._closed: - # acquired connection is already released on connector closing - return - - self._release_acquired(key, protocol) - - if self._force_close: - should_close = True - - if should_close or protocol.should_close: - transport = protocol.transport - protocol.close() - - if key.is_ssl and not self._cleanup_closed_disabled: - self._cleanup_closed_transports.append(transport) - else: - conns = self._conns.get(key) - if conns is None: - conns = self._conns[key] = [] - conns.append((protocol, self._loop.time())) - - if self._cleanup_handle is None: - self._cleanup_handle = helpers.weakref_handle( - self, "_cleanup", self._keepalive_timeout, self._loop - ) - - async def _create_connection( - self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" - ) -> ResponseHandler: - raise NotImplementedError() - - -class _DNSCacheTable: - def __init__(self, ttl: Optional[float] = None) -> None: - self._addrs_rr: Dict[Tuple[str, int], Tuple[Iterator[Dict[str, Any]], int]] = {} - self._timestamps: Dict[Tuple[str, int], float] = {} - self._ttl = ttl - - def __contains__(self, host: object) -> bool: - return host in self._addrs_rr - - def add(self, key: Tuple[str, int], addrs: List[Dict[str, Any]]) -> None: - self._addrs_rr[key] = (cycle(addrs), len(addrs)) - - if self._ttl: - self._timestamps[key] = monotonic() - - def remove(self, key: Tuple[str, int]) -> None: - self._addrs_rr.pop(key, None) - - if self._ttl: - self._timestamps.pop(key, None) - - def clear(self) -> None: - self._addrs_rr.clear() - self._timestamps.clear() - - def next_addrs(self, key: Tuple[str, int]) -> List[Dict[str, Any]]: - loop, length = self._addrs_rr[key] - addrs = list(islice(loop, length)) - # Consume one more element to shift internal state of `cycle` - next(loop) - return addrs - - def expired(self, key: Tuple[str, int]) -> bool: - if self._ttl is None: - return False - - return self._timestamps[key] + self._ttl < monotonic() - - -class TCPConnector(BaseConnector): - """TCP connector. - - verify_ssl - Set to True to check ssl certifications. - fingerprint - Pass the binary sha256 - digest of the expected certificate in DER format to verify - that the certificate the server presents matches. See also - https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning - resolver - Enable DNS lookups and use this - resolver - use_dns_cache - Use memory cache for DNS lookups. - ttl_dns_cache - Max seconds having cached a DNS entry, None forever. - family - socket address family - local_addr - local tuple of (host, port) to bind socket to - - keepalive_timeout - (optional) Keep-alive timeout. - force_close - Set to True to force close and do reconnect - after each request (and between redirects). - limit - The total number of simultaneous connections. - limit_per_host - Number of simultaneous connections to one host. - enable_cleanup_closed - Enables clean-up closed ssl transports. - Disabled by default. - loop - Optional event loop. - """ - - def __init__( - self, - *, - verify_ssl: bool = True, - fingerprint: Optional[bytes] = None, - use_dns_cache: bool = True, - ttl_dns_cache: Optional[int] = 10, - family: int = 0, - ssl_context: Optional[SSLContext] = None, - ssl: Union[None, bool, Fingerprint, SSLContext] = None, - local_addr: Optional[Tuple[str, int]] = None, - resolver: Optional[AbstractResolver] = None, - keepalive_timeout: Union[None, float, object] = sentinel, - force_close: bool = False, - limit: int = 100, - limit_per_host: int = 0, - enable_cleanup_closed: bool = False, - loop: Optional[asyncio.AbstractEventLoop] = None, - ): - super().__init__( - keepalive_timeout=keepalive_timeout, - force_close=force_close, - limit=limit, - limit_per_host=limit_per_host, - enable_cleanup_closed=enable_cleanup_closed, - loop=loop, - ) - - self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) - if resolver is None: - resolver = DefaultResolver(loop=self._loop) - self._resolver = resolver - - self._use_dns_cache = use_dns_cache - self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache) - self._throttle_dns_events: Dict[Tuple[str, int], EventResultOrError] = {} - self._family = family - self._local_addr = local_addr - - def close(self) -> Awaitable[None]: - """Close all ongoing DNS calls.""" - for ev in self._throttle_dns_events.values(): - ev.cancel() - - return super().close() - - @property - def family(self) -> int: - """Socket family like AF_INET.""" - return self._family - - @property - def use_dns_cache(self) -> bool: - """True if local DNS caching is enabled.""" - return self._use_dns_cache - - def clear_dns_cache( - self, host: Optional[str] = None, port: Optional[int] = None - ) -> None: - """Remove specified host/port or clear all dns local cache.""" - if host is not None and port is not None: - self._cached_hosts.remove((host, port)) - elif host is not None or port is not None: - raise ValueError("either both host and port " "or none of them are allowed") - else: - self._cached_hosts.clear() - - async def _resolve_host( - self, host: str, port: int, traces: Optional[List["Trace"]] = None - ) -> List[Dict[str, Any]]: - if is_ip_address(host): - return [ - { - "hostname": host, - "host": host, - "port": port, - "family": self._family, - "proto": 0, - "flags": 0, - } - ] - - if not self._use_dns_cache: - - if traces: - for trace in traces: - await trace.send_dns_resolvehost_start(host) - - res = await self._resolver.resolve(host, port, family=self._family) - - if traces: - for trace in traces: - await trace.send_dns_resolvehost_end(host) - - return res - - key = (host, port) - - if (key in self._cached_hosts) and (not self._cached_hosts.expired(key)): - # get result early, before any await (#4014) - result = self._cached_hosts.next_addrs(key) - - if traces: - for trace in traces: - await trace.send_dns_cache_hit(host) - return result - - if key in self._throttle_dns_events: - # get event early, before any await (#4014) - event = self._throttle_dns_events[key] - if traces: - for trace in traces: - await trace.send_dns_cache_hit(host) - await event.wait() - else: - # update dict early, before any await (#4014) - self._throttle_dns_events[key] = EventResultOrError(self._loop) - if traces: - for trace in traces: - await trace.send_dns_cache_miss(host) - try: - - if traces: - for trace in traces: - await trace.send_dns_resolvehost_start(host) - - addrs = await self._resolver.resolve(host, port, family=self._family) - if traces: - for trace in traces: - await trace.send_dns_resolvehost_end(host) - - self._cached_hosts.add(key, addrs) - self._throttle_dns_events[key].set() - except BaseException as e: - # any DNS exception, independently of the implementation - # is set for the waiters to raise the same exception. - self._throttle_dns_events[key].set(exc=e) - raise - finally: - self._throttle_dns_events.pop(key) - - return self._cached_hosts.next_addrs(key) - - async def _create_connection( - self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" - ) -> ResponseHandler: - """Create connection. - - Has same keyword arguments as BaseEventLoop.create_connection. - """ - if req.proxy: - _, proto = await self._create_proxy_connection(req, traces, timeout) - else: - _, proto = await self._create_direct_connection(req, traces, timeout) - - return proto - - @staticmethod - @functools.lru_cache(None) - def _make_ssl_context(verified: bool) -> SSLContext: - if verified: - return ssl.create_default_context() - else: - sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - sslcontext.options |= ssl.OP_NO_SSLv2 - sslcontext.options |= ssl.OP_NO_SSLv3 - sslcontext.check_hostname = False - sslcontext.verify_mode = ssl.CERT_NONE - try: - sslcontext.options |= ssl.OP_NO_COMPRESSION - except AttributeError as attr_err: - warnings.warn( - "{!s}: The Python interpreter is compiled " - "against OpenSSL < 1.0.0. Ref: " - "https://docs.python.org/3/library/ssl.html" - "#ssl.OP_NO_COMPRESSION".format(attr_err), - ) - sslcontext.set_default_verify_paths() - return sslcontext - - def _get_ssl_context(self, req: "ClientRequest") -> Optional[SSLContext]: - """Logic to get the correct SSL context - - 0. if req.ssl is false, return None - - 1. if ssl_context is specified in req, use it - 2. if _ssl_context is specified in self, use it - 3. otherwise: - 1. if verify_ssl is not specified in req, use self.ssl_context - (will generate a default context according to self.verify_ssl) - 2. if verify_ssl is True in req, generate a default SSL context - 3. if verify_ssl is False in req, generate a SSL context that - won't verify - """ - if req.is_ssl(): - if ssl is None: # pragma: no cover - raise RuntimeError("SSL is not supported.") - sslcontext = req.ssl - if isinstance(sslcontext, ssl.SSLContext): - return sslcontext - if sslcontext is not None: - # not verified or fingerprinted - return self._make_ssl_context(False) - sslcontext = self._ssl - if isinstance(sslcontext, ssl.SSLContext): - return sslcontext - if sslcontext is not None: - # not verified or fingerprinted - return self._make_ssl_context(False) - return self._make_ssl_context(True) - else: - return None - - def _get_fingerprint(self, req: "ClientRequest") -> Optional["Fingerprint"]: - ret = req.ssl - if isinstance(ret, Fingerprint): - return ret - ret = self._ssl - if isinstance(ret, Fingerprint): - return ret - return None - - async def _wrap_create_connection( - self, - *args: Any, - req: "ClientRequest", - timeout: "ClientTimeout", - client_error: Type[Exception] = ClientConnectorError, - **kwargs: Any, - ) -> Tuple[asyncio.Transport, ResponseHandler]: - try: - async with ceil_timeout(timeout.sock_connect): - return await self._loop.create_connection(*args, **kwargs) # type: ignore[return-value] # noqa - except cert_errors as exc: - raise ClientConnectorCertificateError(req.connection_key, exc) from exc - except ssl_errors as exc: - raise ClientConnectorSSLError(req.connection_key, exc) from exc - except OSError as exc: - if exc.errno is None and isinstance(exc, asyncio.TimeoutError): - raise - raise client_error(req.connection_key, exc) from exc - - def _fail_on_no_start_tls(self, req: "ClientRequest") -> None: - """Raise a :py:exc:`RuntimeError` on missing ``start_tls()``. - - One case is that :py:meth:`asyncio.loop.start_tls` is not yet - implemented under Python 3.6. It is necessary for TLS-in-TLS so - that it is possible to send HTTPS queries through HTTPS proxies. - - This doesn't affect regular HTTP requests, though. - """ - if not req.is_ssl(): - return - - proxy_url = req.proxy - assert proxy_url is not None - if proxy_url.scheme != "https": - return - - self._check_loop_for_start_tls() - - def _check_loop_for_start_tls(self) -> None: - try: - self._loop.start_tls - except AttributeError as attr_exc: - raise RuntimeError( - "An HTTPS request is being sent through an HTTPS proxy. " - "This needs support for TLS in TLS but it is not implemented " - "in your runtime for the stdlib asyncio.\n\n" - "Please upgrade to Python 3.7 or higher. For more details, " - "please see:\n" - "* https://bugs.python.org/issue37179\n" - "* https://github.com/python/cpython/pull/28073\n" - "* https://docs.aiohttp.org/en/stable/" - "client_advanced.html#proxy-support\n" - "* https://github.com/aio-libs/aiohttp/discussions/6044\n", - ) from attr_exc - - def _loop_supports_start_tls(self) -> bool: - try: - self._check_loop_for_start_tls() - except RuntimeError: - return False - else: - return True - - def _warn_about_tls_in_tls( - self, - underlying_transport: asyncio.Transport, - req: "ClientRequest", - ) -> None: - """Issue a warning if the requested URL has HTTPS scheme.""" - if req.request_info.url.scheme != "https": - return - - asyncio_supports_tls_in_tls = getattr( - underlying_transport, - "_start_tls_compatible", - False, - ) - - if asyncio_supports_tls_in_tls: - return - - warnings.warn( - "An HTTPS request is being sent through an HTTPS proxy. " - "This support for TLS in TLS is known to be disabled " - "in the stdlib asyncio. This is why you'll probably see " - "an error in the log below.\n\n" - "It is possible to enable it via monkeypatching under " - "Python 3.7 or higher. For more details, see:\n" - "* https://bugs.python.org/issue37179\n" - "* https://github.com/python/cpython/pull/28073\n\n" - "You can temporarily patch this as follows:\n" - "* https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support\n" - "* https://github.com/aio-libs/aiohttp/discussions/6044\n", - RuntimeWarning, - source=self, - # Why `4`? At least 3 of the calls in the stack originate - # from the methods in this class. - stacklevel=3, - ) - - async def _start_tls_connection( - self, - underlying_transport: asyncio.Transport, - req: "ClientRequest", - timeout: "ClientTimeout", - client_error: Type[Exception] = ClientConnectorError, - ) -> Tuple[asyncio.BaseTransport, ResponseHandler]: - """Wrap the raw TCP transport with TLS.""" - tls_proto = self._factory() # Create a brand new proto for TLS - - # Safety of the `cast()` call here is based on the fact that - # internally `_get_ssl_context()` only returns `None` when - # `req.is_ssl()` evaluates to `False` which is never gonna happen - # in this code path. Of course, it's rather fragile - # maintainability-wise but this is to be solved separately. - sslcontext = cast(ssl.SSLContext, self._get_ssl_context(req)) - - try: - async with ceil_timeout(timeout.sock_connect): - try: - tls_transport = await self._loop.start_tls( - underlying_transport, - tls_proto, - sslcontext, - server_hostname=req.host, - ssl_handshake_timeout=timeout.total, - ) - except BaseException: - # We need to close the underlying transport since - # `start_tls()` probably failed before it had a - # chance to do this: - underlying_transport.close() - raise - except cert_errors as exc: - raise ClientConnectorCertificateError(req.connection_key, exc) from exc - except ssl_errors as exc: - raise ClientConnectorSSLError(req.connection_key, exc) from exc - except OSError as exc: - if exc.errno is None and isinstance(exc, asyncio.TimeoutError): - raise - raise client_error(req.connection_key, exc) from exc - except TypeError as type_err: - # Example cause looks like this: - # TypeError: transport is not supported by start_tls() - - raise ClientConnectionError( - "Cannot initialize a TLS-in-TLS connection to host " - f"{req.host!s}:{req.port:d} through an underlying connection " - f"to an HTTPS proxy {req.proxy!s} ssl:{req.ssl or 'default'} " - f"[{type_err!s}]" - ) from type_err - else: - tls_proto.connection_made( - tls_transport - ) # Kick the state machine of the new TLS protocol - - return tls_transport, tls_proto - - async def _create_direct_connection( - self, - req: "ClientRequest", - traces: List["Trace"], - timeout: "ClientTimeout", - *, - client_error: Type[Exception] = ClientConnectorError, - ) -> Tuple[asyncio.Transport, ResponseHandler]: - sslcontext = self._get_ssl_context(req) - fingerprint = self._get_fingerprint(req) - - host = req.url.raw_host - assert host is not None - port = req.port - assert port is not None - host_resolved = asyncio.ensure_future( - self._resolve_host(host, port, traces=traces), loop=self._loop - ) - try: - # Cancelling this lookup should not cancel the underlying lookup - # or else the cancel event will get broadcast to all the waiters - # across all connections. - hosts = await asyncio.shield(host_resolved) - except asyncio.CancelledError: - - def drop_exception(fut: "asyncio.Future[List[Dict[str, Any]]]") -> None: - with suppress(Exception, asyncio.CancelledError): - fut.result() - - host_resolved.add_done_callback(drop_exception) - raise - except OSError as exc: - if exc.errno is None and isinstance(exc, asyncio.TimeoutError): - raise - # in case of proxy it is not ClientProxyConnectionError - # it is problem of resolving proxy ip itself - raise ClientConnectorError(req.connection_key, exc) from exc - - last_exc: Optional[Exception] = None - - for hinfo in hosts: - host = hinfo["host"] - port = hinfo["port"] - - try: - transp, proto = await self._wrap_create_connection( - self._factory, - host, - port, - timeout=timeout, - ssl=sslcontext, - family=hinfo["family"], - proto=hinfo["proto"], - flags=hinfo["flags"], - server_hostname=hinfo["hostname"] if sslcontext else None, - local_addr=self._local_addr, - req=req, - client_error=client_error, - ) - except ClientConnectorError as exc: - last_exc = exc - continue - - if req.is_ssl() and fingerprint: - try: - fingerprint.check(transp) - except ServerFingerprintMismatch as exc: - transp.close() - if not self._cleanup_closed_disabled: - self._cleanup_closed_transports.append(transp) - last_exc = exc - continue - - return transp, proto - else: - assert last_exc is not None - raise last_exc - - async def _create_proxy_connection( - self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" - ) -> Tuple[asyncio.BaseTransport, ResponseHandler]: - self._fail_on_no_start_tls(req) - runtime_has_start_tls = self._loop_supports_start_tls() - - headers: Dict[str, str] = {} - if req.proxy_headers is not None: - headers = req.proxy_headers # type: ignore[assignment] - headers[hdrs.HOST] = req.headers[hdrs.HOST] - - url = req.proxy - assert url is not None - proxy_req = ClientRequest( - hdrs.METH_GET, - url, - headers=headers, - auth=req.proxy_auth, - loop=self._loop, - ssl=req.ssl, - ) - - # create connection to proxy server - transport, proto = await self._create_direct_connection( - proxy_req, [], timeout, client_error=ClientProxyConnectionError - ) - - # Many HTTP proxies has buggy keepalive support. Let's not - # reuse connection but close it after processing every - # response. - proto.force_close() - - auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None) - if auth is not None: - if not req.is_ssl(): - req.headers[hdrs.PROXY_AUTHORIZATION] = auth - else: - proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth - - if req.is_ssl(): - if runtime_has_start_tls: - self._warn_about_tls_in_tls(transport, req) - - # For HTTPS requests over HTTP proxy - # we must notify proxy to tunnel connection - # so we send CONNECT command: - # CONNECT www.python.org:443 HTTP/1.1 - # Host: www.python.org - # - # next we must do TLS handshake and so on - # to do this we must wrap raw socket into secure one - # asyncio handles this perfectly - proxy_req.method = hdrs.METH_CONNECT - proxy_req.url = req.url - key = attr.evolve( - req.connection_key, proxy=None, proxy_auth=None, proxy_headers_hash=None - ) - conn = Connection(self, key, proto, self._loop) - proxy_resp = await proxy_req.send(conn) - try: - protocol = conn._protocol - assert protocol is not None - - # read_until_eof=True will ensure the connection isn't closed - # once the response is received and processed allowing - # START_TLS to work on the connection below. - protocol.set_response_params(read_until_eof=runtime_has_start_tls) - resp = await proxy_resp.start(conn) - except BaseException: - proxy_resp.close() - conn.close() - raise - else: - conn._protocol = None - conn._transport = None - try: - if resp.status != 200: - message = resp.reason - if message is None: - message = RESPONSES[resp.status][0] - raise ClientHttpProxyError( - proxy_resp.request_info, - resp.history, - status=resp.status, - message=message, - headers=resp.headers, - ) - if not runtime_has_start_tls: - rawsock = transport.get_extra_info("socket", default=None) - if rawsock is None: - raise RuntimeError( - "Transport does not expose socket instance" - ) - # Duplicate the socket, so now we can close proxy transport - rawsock = rawsock.dup() - except BaseException: - # It shouldn't be closed in `finally` because it's fed to - # `loop.start_tls()` and the docs say not to touch it after - # passing there. - transport.close() - raise - finally: - if not runtime_has_start_tls: - transport.close() - - if not runtime_has_start_tls: - # HTTP proxy with support for upgrade to HTTPS - sslcontext = self._get_ssl_context(req) - return await self._wrap_create_connection( - self._factory, - timeout=timeout, - ssl=sslcontext, - sock=rawsock, - server_hostname=req.host, - req=req, - ) - - return await self._start_tls_connection( - # Access the old transport for the last time before it's - # closed and forgotten forever: - transport, - req=req, - timeout=timeout, - ) - finally: - proxy_resp.close() - - return transport, proto - - -class UnixConnector(BaseConnector): - """Unix socket connector. - - path - Unix socket path. - keepalive_timeout - (optional) Keep-alive timeout. - force_close - Set to True to force close and do reconnect - after each request (and between redirects). - limit - The total number of simultaneous connections. - limit_per_host - Number of simultaneous connections to one host. - loop - Optional event loop. - """ - - def __init__( - self, - path: str, - force_close: bool = False, - keepalive_timeout: Union[object, float, None] = sentinel, - limit: int = 100, - limit_per_host: int = 0, - loop: Optional[asyncio.AbstractEventLoop] = None, - ) -> None: - super().__init__( - force_close=force_close, - keepalive_timeout=keepalive_timeout, - limit=limit, - limit_per_host=limit_per_host, - loop=loop, - ) - self._path = path - - @property - def path(self) -> str: - """Path to unix socket.""" - return self._path - - async def _create_connection( - self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" - ) -> ResponseHandler: - try: - async with ceil_timeout(timeout.sock_connect): - _, proto = await self._loop.create_unix_connection( - self._factory, self._path - ) - except OSError as exc: - if exc.errno is None and isinstance(exc, asyncio.TimeoutError): - raise - raise UnixClientConnectorError(self.path, req.connection_key, exc) from exc - - return cast(ResponseHandler, proto) - - -class NamedPipeConnector(BaseConnector): - """Named pipe connector. - - Only supported by the proactor event loop. - See also: https://docs.python.org/3.7/library/asyncio-eventloop.html - - path - Windows named pipe path. - keepalive_timeout - (optional) Keep-alive timeout. - force_close - Set to True to force close and do reconnect - after each request (and between redirects). - limit - The total number of simultaneous connections. - limit_per_host - Number of simultaneous connections to one host. - loop - Optional event loop. - """ - - def __init__( - self, - path: str, - force_close: bool = False, - keepalive_timeout: Union[object, float, None] = sentinel, - limit: int = 100, - limit_per_host: int = 0, - loop: Optional[asyncio.AbstractEventLoop] = None, - ) -> None: - super().__init__( - force_close=force_close, - keepalive_timeout=keepalive_timeout, - limit=limit, - limit_per_host=limit_per_host, - loop=loop, - ) - if not isinstance( - self._loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] - ): - raise RuntimeError( - "Named Pipes only available in proactor " "loop under windows" - ) - self._path = path - - @property - def path(self) -> str: - """Path to the named pipe.""" - return self._path - - async def _create_connection( - self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" - ) -> ResponseHandler: - try: - async with ceil_timeout(timeout.sock_connect): - _, proto = await self._loop.create_pipe_connection( # type: ignore[attr-defined] # noqa: E501 - self._factory, self._path - ) - # the drain is required so that the connection_made is called - # and transport is set otherwise it is not set before the - # `assert conn.transport is not None` - # in client.py's _request method - await asyncio.sleep(0) - # other option is to manually set transport like - # `proto.transport = trans` - except OSError as exc: - if exc.errno is None and isinstance(exc, asyncio.TimeoutError): - raise - raise ClientConnectorError(req.connection_key, exc) from exc - - return cast(ResponseHandler, proto) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py deleted file mode 100644 index b7eee4ad7db99c447732e3f3ebf2e8c108fe93a8..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py +++ /dev/null @@ -1,40 +0,0 @@ -from sqlalchemy.sql.ddl import DDL -from sqlalchemy.exc import ArgumentError - -from clickhouse_connect.driver.query import quote_identifier - - -# pylint: disable=too-many-ancestors,abstract-method -class CreateDatabase(DDL): - """ - SqlAlchemy DDL statement that is essentially an alternative to the built in CreateSchema DDL class - """ - # pylint: disable-msg=too-many-arguments - def __init__(self, name: str, engine: str = None, zoo_path: str = None, shard_name: str = '{shard}', - replica_name: str = '{replica}'): - """ - :param name: Database name - :param engine: Database ClickHouse engine type - :param zoo_path: ClickHouse zookeeper path for Replicated database engine - :param shard_name: Clickhouse shard name for Replicated database engine - :param replica_name: Replica name for Replicated database engine - """ - if engine and engine not in ('Ordinary', 'Atomic', 'Lazy', 'Replicated'): - raise ArgumentError(f'Unrecognized engine type {engine}') - stmt = f'CREATE DATABASE {quote_identifier(name)}' - if engine: - stmt += f' Engine {engine}' - if engine == 'Replicated': - if not zoo_path: - raise ArgumentError('zoo_path is required for Replicated Database Engine') - stmt += f" ('{zoo_path}', '{shard_name}', '{replica_name}'" - super().__init__(stmt) - - -# pylint: disable=too-many-ancestors,abstract-method -class DropDatabase(DDL): - """ - Alternative DDL statement for built in SqlAlchemy DropSchema DDL class - """ - def __init__(self, name: str): - super().__init__(f'DROP DATABASE {quote_identifier(name)}') diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/components/checkbox.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/components/checkbox.py deleted file mode 100644 index c8681b735e5a009dd9d133abc31b1042c9756dbd..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/components/checkbox.py +++ /dev/null @@ -1,134 +0,0 @@ -"""gr.Checkbox() component.""" - -from __future__ import annotations - -from typing import Callable, Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import BooleanSerializable - -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable -from gradio.interpretation import NeighborInterpretable - -set_documentation_group("component") - - -@document() -class Checkbox( - FormComponent, - Changeable, - Inputable, - Selectable, - IOComponent, - BooleanSerializable, - NeighborInterpretable, -): - """ - Creates a checkbox that can be set to `True` or `False`. - - Preprocessing: passes the status of the checkbox as a {bool} into the function. - Postprocessing: expects a {bool} returned from the function and, if it is True, checks the checkbox. - Examples-format: a {bool} representing whether the box is checked. - Demos: sentence_builder, titanic_survival - """ - - def __init__( - self, - value: bool | Callable = False, - *, - label: str | None = None, - info: str | None = None, - every: float | None = None, - show_label: bool = True, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - value: if True, checked by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - info: additional component description. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, this checkbox can be checked; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.select: EventListenerMethod - """ - Event listener for when the user selects or deselects Checkbox. - Uses event data gradio.SelectData to carry `value` referring to label of checkbox, and `selected` to refer to state of checkbox. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, - label=label, - info=info, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - NeighborInterpretable.__init__(self) - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: bool | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - info: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - return { - "label": label, - "info": info, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - - def get_interpretation_neighbors(self, x): - return [not x], {} - - def get_interpretation_scores(self, x, neighbors, scores, **kwargs): - """ - Returns: - The first value represents the interpretation score if the input is False, and the second if the input is True. - """ - if x: - return scores[0], None - else: - return None, scores[0] diff --git a/spaces/cihyFjudo/fairness-paper-search/CRACK Internet Download Manager (IDM) 6.30 Build 10 Crack A Step-by-Step Tutorial on How to Install and Use IDM.md b/spaces/cihyFjudo/fairness-paper-search/CRACK Internet Download Manager (IDM) 6.30 Build 10 Crack A Step-by-Step Tutorial on How to Install and Use IDM.md deleted file mode 100644 index 30736c6c030c56994a39e5a81e35c5176c1f7416..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/CRACK Internet Download Manager (IDM) 6.30 Build 10 Crack A Step-by-Step Tutorial on How to Install and Use IDM.md +++ /dev/null @@ -1,6 +0,0 @@ -

CRACK Internet Download Manager (IDM) 6.30 Build 10 Crack


Download Zip ––– https://tinurli.com/2uwhNd



- - aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Paint Shop Pro 6 Keygen and Unlock All the Features of the Photo Editing Program.md b/spaces/cihyFjudo/fairness-paper-search/Download Paint Shop Pro 6 Keygen and Unlock All the Features of the Photo Editing Program.md deleted file mode 100644 index e0d3bd765c10b15088412559bdc0de3d0a8cba08..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Download Paint Shop Pro 6 Keygen and Unlock All the Features of the Photo Editing Program.md +++ /dev/null @@ -1,26 +0,0 @@ -
-

Awful program. Interface is kludgy and poorly designed, not as bad as Adobe, but trying to imitate that app destroyed what was Paint Shop's best feature -- ease of use. One of the most buggy apps I've ever used. Almost as bad as Pinnacle, in that class in terms of bad code. Won't load, takes forever to load, freezes, won't let you save work. A total mess. I thought X2, X3, X4 were bad. This is the worst. I've quit Corel and moved to Photoshop, which I detest. It's that bad.

-

Updaqte as at 29th October re Corel Paint Shop Pro X5 and AfterShot Pro. Both products still exhibit drastic vignetting / distortion when downloading wide angle RAW files taken with Canon G1X. Finally Corel have closed this file , as they said "...due to no activity for the last 10 days... Interesting !! there was no communication from Corel during tha t time that any action from their end was contemplated. I have tested both Lightroom 4 and Photoshop Elements 11 - these two Adobe products appear to handle these files correctly. I'd be interested to hear of anyone else who've experienced similar problems with these Corel products.
p.s. A letter (not email) was forwarded to the company's head office some time ago - at the time of posting to this forum (surprise, surprise) I have received no response.

-

paint shop pro 6 keygen


Download File >>>>> https://tinurli.com/2uwiUr



-

Paintshop Pro X4 is just buggy! First it takes about 20 seconds to open up, when it does open up correctly, then occasionally, it will not completely load and crash.
Seeing from other users here, why should I take a chance with X5?
Since I purchased X4, I have not seen one update to correct for the bugs, and I have not read that X5 corrected these bugs.
I may buy it second-hand on ebay, when people start selling it, if it's cheap enough.
P.S. I run WinXP on a virus-free Quad speed cpu desktop and have 4gb of memory and plenty of room on my hard drive!

-

Worse, the program actually makes you think that you can save a 16-bit TIF, but if I then take the saved TIF and look at the histogram, I find that there are only 256 distinct levels of RGB information, not 65,536 (using the "wide histogram" plugin in Photoshop, or the freeware application "Histogrammar").

-

Apologies if we are not talking about the same thing but if you go into PSP's "Color Management" setup under "File" you can work in any color space you have installed on your PC, including Adobe RGB and Pro. You can choose converting your image to that space or assigning it etc. I usually work in aRGB with PSP or even in the uniquely defined color space of my wide gamut monitor and no problems at all. My only complaint is that color management is not implemented as conveniently as in Photoshop. For example, to change color space you must close all open images ... a nuisance when you must do it but I seldom change my color space preference anyway.

-

For Adobe it's simple - Apple invested in Adobe and Photoshop started on Mac. Then, once you have product core working it's relatively easy to add new features to both Windows and Mac versions.
For Corel Mac support would be like development of a new product from scratch.

-

There are apparently no solid figures available for Macs in use worldwide -- Apple probably knows but isn't talking -- but estimates exist that 80-million Macs (and perhaps up to 100-million when you include older, pre-Intel chip versions) are currently in use. Not too shabby. Corel makes a pretty fine product, and yes, it would take a lot of R&D to gear up a Mac version of PaintShop, but that could mean MILLIONS of copies sold to the Mac creative community on a continuing basis, who are sick of paying $600-$900 for the bloated and overpriced Adobe Photoshops. If you were the CEO of Corel, why wouldn't you start down that road, knowing that the market is just sitting there waiting? Come on Corel, it's the 21st Century --get over the anti-Mac 1980's huff and climb on board!

-

-

The one-time purchase options are a good fit for those who still resent Adobe's move to a subscription-only model for Photoshop, Lightroom, and Illustrator. For $9.99 per month, you get both Photoshop and Lightroom, but Illustrator starts at $19.99 per month, if you prepay for a year. Photoshop Elements ($99), Adobe's consumer-level photo editing software, requires no subscription, but that software has more of a hobbyist feel, as opposed to the company's pro-level offerings.

-

The Ultimate version adds a Highlight Reel video slideshow-creating feature (similar to the one in Corel VideoStudio), MultiCam Capture Lite for screen and webcam video presentations, Painter Essentials 8 for simple drawing, sketching, and painting on the PC.

-

You can also save in Adobe PSD format (though you lose vector layers and other features), along with dozens of other standard image formats. If you open a PSD file created in Photoshop, layers are preserved, and you can edit them separately to taste. Afterwards, your edits are fully editable if you open the resulting PSD in Photoshop. What this means is that if you're working with someone who uses Photoshop, you'll be able to edit compatibly in PaintShop, but if you start in PaintShop, they'll only see a flattened version of your file.

-

Another gap is the lack of control over the effects. Sometimes you want to tone it down a bit, as I found with the Instant Film effect. Photoshop Elements' instant effects are indeed adjustable, but PaintShop's aren't.

-

The most commonly used photo editing tool by far is the crop tool. It may seem that there's nothing to it, but Adobe supercharged Photoshop's crop tool, even adding AI-powered auto-suggested cropping (now also found in Photoshop Elements). Corel continues to give attention to its own crop tool, too. It gives you a better idea of your final result by darkening the rest of the image. It offers overlays for composition guides, including golden spiral, golden ratio, and rule of thirds. When you rotate with the tool, the crop box stays put while the image rotates, so you can see the result without tilting your head.

-

AI Background Replacement. Replacing a photo's background used to be a many-step, hit-or-miss process in Photoshop. That program, and now PaintShop have both flipped the script on that scenario, making it a one-click affair. The AI Background Replacment tool in PaintShop works with human subjects, while Photoshop and Skylum Luminar now have tools for changing background skies in landscapes, too. The latter is still missing in PaintShop.

-

AI Background replacement is not unlike using Photoshop's Subject Select tool, which instantly isolates and masks a human (or even nonhuman) subject in your photo and lets you put whatever you want in the background layer. PaintShop does simplify the process, however, offering preset backgrounds.

-

AI Portrait Mode. I was expecting AI face manipulation tools like those in ON1 and Photoshop, but this tool is really just for selecting a subject and adding background blur. It works much like the iPhone's Portrait mode. The quality of the result depends on the accuracy of the selection. The selection wasn't perfect for my test shot, but luckily you can tweak it. Since the effect is simulating lens bokeh, it's interesting that you can choose between round and hexagonal apertures. I found that using the latter with less feathering worked best.

-

AI Upsampling. We've all had to deal with an image that was just too small or low-resolution for the purpose at hand. This tool does a remarkable job of removing that blocky effect when you enlarge such photos. The left side in the image above shows those blocky artifacts, while the right side uses Corel's AI Upsampling tool to produce a pleasing, smooth result. The tool offers denoising at the same time, but I was able to get this result without using any. Photoshop offers several sampling options for enlargement, but when I used them on the same image, none of them produced a result as good as this. They all still showed blockiness and artifact distortion.

-

AI Style Transfer. This is an effect that an earlier version of PaintShop called Pic-to-Painting. It's only available in the minimalist Photography workspace along with other effects in an Instant Effects panel. These effects resemble the Prisma-app craze of a few years ago, and have appeared in many photo apps, notably the competing CyberLink PhotoDirector. They use AI technology to generate art from your photos resembling that of specific painters, such a Picasso or Van Gogh.

-

Corel includes a good selection of painterly and artistic effects by default, while CyberLink requires extra downloading and charges extra for some of the effects. You can use a slider to adjust the strength of the effect, for a degree of customization. The Photography interface lets you use the split before-and-after view, seen above.

-

Once you move into Edit mode, the full assortment of tools comes into play. Just as in Photoshop, you can add layers, manipulate grouped objects, and adjust curves and levels. Layers are much better done than in ON1 Photo Raw, with a more Photoshop-like, clear view of each layer in an optional panel. You can create Vector, Raster, Art Media, Mask, and Adjustment layer types, with all the blending modes you'd expect.

-

Two selection tools, Smart Selection and Auto Selection, are similar to Photoshop's magic wand. The first did a decent job of letting me brush to create an edge-detected selection. But the Auto Selection is more impressive. You draw a box, and the tool selects an object inside it. In my testing, this only worked with very uniform backgrounds (a clear sky, for example) and objects with well-defined edges. Still, it's a useful tool for plucking a head off and using it against a different background. In the right circumstances, it works quite well.

-

Content-aware object removal and moving is a recent addition. This lets you improve composition by moving or removing an object within a photo, often a human, while maintaining the background. For removal, you have to select some background to replace the object with, so it's not as automatic as the equivalent tool in Adobe Photoshop Elements. The clone stamp tool shows a preview where you're about to apply it, and like all the tools and brushes, the size slider is based on your image size, which helps prevent you from getting a tiny brush when you need to make big changes, for example.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/How to Use Easy Suite Application Download Software to Boost Your Productivity and Efficiency.md b/spaces/cihyFjudo/fairness-paper-search/How to Use Easy Suite Application Download Software to Boost Your Productivity and Efficiency.md deleted file mode 100644 index 4e9efb06120a45a7863acb457d2d4563f485b427..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/How to Use Easy Suite Application Download Software to Boost Your Productivity and Efficiency.md +++ /dev/null @@ -1,29 +0,0 @@ - -

Thousands of organizations around the world rely on Oracle E-Business Suite to run their key business operations. Oracle continues to invest in the suite with a focus on functional advances, mobility, UI modernization, and operational efficiency. At the same time, Oracle is providing a steady stream of cloud infrastructure innovations and complementary SaaS applications to provide you with enhanced capabilities and practical coexistence strategies to optimize your Oracle E-Business Suite environments.

-

Easy Suite Application Download Software


Download File >>>>> https://tinurli.com/2uwj4y



-

DCAA does not provide technical support on this product outside of DCAA audit activities. This product has been developed and tested by DCAA for use in its audit processes. It is freeware and its use and copying is unrestricted. While DCAA has made every effort to identify and repair all deficiencies in this product, DCAA does not guarantee that this product is free from defects that might interfere with its operation on the downloader's system. This product is made available "as is," and the use of the software is done at the downloader's own risk. DCAA makes no warranties, expressed or implied, concerning this product.

-

All drivers available for download have been scanned by antivirus program.Aug 12, 2015 Easysuite Download Windows 10. Easysuite download windows 10 Free easysuite download windows 10 download software at UpdateStar -Download Easysuite Software Download - best software for Windows.Star EASY Suite for.Net:.Now I want to use it for Windows 7. This page contains information about installing the latest Insignia driver downloads using the Insignia Driver Update Tool. Insignia drivers are tiny programs that enable your Insignia hardware to communicate with your operating system software. Smartphone Apps. Insignia Library System comes with Smartphone Apps for Apple and Android devices, the App provides the patron with search and reserve capability.

-

Enjoy registry repair made easy with Registry Easy. Your privacy is better protected when you can remove your history records and wipe them clean. Registry Easy Installer downloads and installs latest version of Registry Easy.

-

Easy iPod Downloads - Download Music and Movies Straight to Your Ipod ! We give access to the most widely used ipod movie downloading software on the planet. More than 12 Million files available for download, with No 'per download' fees ! 100% Fun !

-

Do you want to download snippets creator tool? Download Rich Snippet Generator application from company website www.ibusinessutilities.com to generate search engine friendly microdata code for your website.

-

Typical tasks completed with DC Suite include taking cycle counts, tracing lot codes, tracking assets, or checking tools in or out. Scan barcodes and key in relevant data into a DC Suite application, then upload the data into spreadsheets, accounting software, or ERP systems for immediate access.

-

DC Suite software includes seven pre-written applications to accommodate the most common barcode data collection tasks, making it ready to go to work right out of the box. It is preloaded on the LDX10, TDX20, M7225b, and the Striker mobile computer. Optionally, it can be installed on the Scepter enterprise mobile computer as well.

-

However, the file transfer functions, which are my maininterest with this product, work just fine. As with any USB device, youneed to quit any open applications (in this case the EasyMacCopysoftware on both computers) and drag the volume icon to the Eject iconin the Dock or use the Eject command before disconnecting thecable.

-

-

With the pylon APIs, the target platform of the developed application does not play a role anymore. It is very easy to switch from a Windows environment to a Linux ARM environment without major code changes. This makes pylon perfectly suitable for easy development of embedded systems.

-

Test Universe is OMICRON's comprehensive and easy-to-use testing software for the CMC device family. It allows you to combine a wealth of application-optimized test modules for creating flexible and fully automated test plans that provide you with an enormous range of functions.

-

DigitalMicrograph® is the industry standard software for (scanning) transmission electron microscope ((S)TEM) experimental control and analysis, which people also know as Gatan Microscopy Suite® (GMS). DigitalMicrograph 3.5 is completely revamped and uses a new, much-simplified user interface. DigitalMicrograph 3.5 enables novice users to easily perform basic research applications, while it continues to provide the deep access and control highly experienced microscopists are used to and demand.

-

Within the EPOS Gaming Suite, all our digital audio device hardware is easy to configure. Set up Smart Button behaviors on a soundcard or select the sleep functionality for a wireless headset; receive firmware update notifications and download and install right there from within the Suite.

-

Download VideoPad Video Editor for Windows

  • Supports all popular video formats seamlessly
    Edit video from a DV camcorder, VHS, GoPro, webcam, or all common video file formats including avi, wmv, divx, mp4, apng, mpeg and many more. Breathtaking transitions and visual effects
    Over 50 visual and transition effects to add a professional touch to your movies.
Get it Free. A free video editor version is available for non-commercial use only. If you will be using it at home you can download the free version here. No signup required. Drag and drop video clips
for easy video editing

-

As an early adopter of SaaS technology, Solware was quick to realise the potential of hosting cloud-based automotive and healthcare web applications. Solware Auto has over 20,000 users of WINmotor within French, European, and North African car dealerships, and Solware Life has over 100,000 users of easysuite within the medical profession. On average, over 6,000 connected users access a Solware platform every day.

-

Improved insight also helped identify a long-standing issue between the hosting platform and the software code so that this could be addressed. Overall, New Relic has led to better application quality, which in turn results in higher customer satisfaction.

-

Designate up to four unique NDI sources as video inputs for popular software applications that support a webcam. With NDI Webcam Input, NDI sources are recognized as standard Microsoft® Windows® video and audio sources, making it possible to elevate your video communications without increasing the complexity of your setup.

-

Import your media files captured and recorded from NDI sources into Adobe Creative Cloud software applications from your local drives or across your network using standard storage systems. Once the NDI Import I/O for Adobe Creative Cloud is installed, all Creative Cloud applications that use video will recognize the NDI files as another media option. Simply apply media to your timelines for editing and animation projects. Because NDI files are time-stamped during recording, complex multi-cam editing is an effortless exercise.

-

Diver-Office is an important part of your water level monitoring workflow and supports all Diver equipment. Setup your monitoring projects and identify your fieldwork tasks; such as a site visit sequence, collection of Diver data and deployment of Divers. Use Diver-Field to carry out the fieldwork and download Diver data. Once the data collection is complete, use Diver-Office to merge time series, barometrically compensate the Diver data to convert them into water levels. Visualize data including manual measurements and export data to MON, CSV or XLSX files for further use in third-party applications.

-

Microsoft Office 365 is the latest office software suite from Microsoft. The software suite is now available to all enrolled students, staff, and faculty through the Microsoft campus agreement that IUP participates in. Your Office 365 subscription also includes several Office 365 applications.

-

Comprehensive anti-malware software with built-in real-time protection. IUP highly recommends students download and install this software on their personal machines. Malwarebytes is only available for Windows machines and is available at a discounted rate for students.

-

Using VMWare, the Virtual Computer Lab provides remote access to a Windows desktop so you can access the software applications found on the PCs in the public computer labs as well as the standard desktop. This service is provided for all registered students, faculty, and staff members.

-

Adobe Creative Suite CC is a powerful set of software tools. The Creative Cloud can assist in areas such as web design, image editing, video editing, graphic and art design, desktop publishing, game development, animation, and much more. These applications are available to all IUP administration, staff, and faculty for installation on a personally owned computer. Adobe Creative Cloud is installed on IUP-owned office computers.

-

What's the catch? There is none. Simply download it free of charge and use it for as long as you want. This great free Office suite has impressed millions of people who now use it every day.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/New! Tellwatch 2007 V9l The Swiss Software that Revolutionizes Watchmaking.md b/spaces/cihyFjudo/fairness-paper-search/New! Tellwatch 2007 V9l The Swiss Software that Revolutionizes Watchmaking.md deleted file mode 100644 index a5ad3f1bcb3a0e58084d21c03a4b4d7f3b5a9125..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/New! Tellwatch 2007 V9l The Swiss Software that Revolutionizes Watchmaking.md +++ /dev/null @@ -1,6 +0,0 @@ -

New! Tellwatch 2007 V9l


DOWNLOAD ::: https://tinurli.com/2uwiLV



- - aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Sketchup Pro 2013 Serial Number And Key How to Find and Use Them.md b/spaces/cihyFjudo/fairness-paper-search/Sketchup Pro 2013 Serial Number And Key How to Find and Use Them.md deleted file mode 100644 index fb3a5cc81f025dc7c98ff967ef10003746d05c20..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Sketchup Pro 2013 Serial Number And Key How to Find and Use Them.md +++ /dev/null @@ -1,7 +0,0 @@ - -

Extra
sketchup pro 2013 serial,
sketchup pro 2013 license key,
sketchup pro 2013 keygen,
licença sketchup pro 2013,
sketchup pro 2013 serial number,
sketchup pro 2013 serial key,
sketchup pro 2013 download,
license sketchup pro 2013,
sketchup pro 2013 serial number key,
license for sketchup pro 2013,
serial number sketchup pro 2013,
serial para sketchup pro 2013,
sketchup pro 2013 free license key,
sketchup pro 2013 licence key,
sketchup 2013 serial number,
sketchup pro 2013 licence,
sketchup 2013 pro serial,
sketchup pro license,
sketchup pro 2013 key generator,

-

Sketchup Pro 2013 Serial Number And Key


Download Ziphttps://tinurli.com/2uwjna



-

Hi I have been using sketchup off and on for about 5 years,
I am concerned that when I upgrade from 8.1 to Windows 10 I will not be able to use Sketchup Pro 2013 any more, or it will not work seamlessly. I do not want to have to pay loads of money for an up grade to 16 and am not a Techie but can find my way around a PC just about! Can you allay my concerns?

-

If you still have the e-mail with the serial number and license key, you should be able to reinstall SketchUp 2013 if needed. You should read this* before you do. It may impact whether or not ou want to use SU2013 going ahead.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Why Should Homework Be Banned Facts The Simpsons and Minijuegos Reveal the Truth about Homework.md b/spaces/cihyFjudo/fairness-paper-search/Why Should Homework Be Banned Facts The Simpsons and Minijuegos Reveal the Truth about Homework.md deleted file mode 100644 index adf08ad54e9ff1fa538c39c4463cd16fb4615d62..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Why Should Homework Be Banned Facts The Simpsons and Minijuegos Reveal the Truth about Homework.md +++ /dev/null @@ -1,6 +0,0 @@ -

Why Should Homework Be Banned Facts simpsons minijuegos


Download File 🗸🗸🗸 https://tinurli.com/2uwk2G



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/codeparrot/codeparrot-subspace/README.md b/spaces/codeparrot/codeparrot-subspace/README.md deleted file mode 100644 index 91494f37819cec1a77b9a39694d6be743e2238cc..0000000000000000000000000000000000000000 --- a/spaces/codeparrot/codeparrot-subspace/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Codeparrot Subspace -emoji: 🦜 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.0.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bonk.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bonk.c deleted file mode 100644 index 4a0027039298fcf84eeb7f4c2db2fa225295b3d5..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bonk.c +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Bonk audio decoder - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/internal.h" -#include "libavutil/intreadwrite.h" -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#define BITSTREAM_READER_LE -#include "get_bits.h" -#include "bytestream.h" - -typedef struct BitCount { - uint8_t bit; - unsigned count; -} BitCount; - -typedef struct BonkContext { - GetBitContext gb; - int skip; - - uint8_t *bitstream; - int64_t max_framesize; - int bitstream_size; - int bitstream_index; - - uint64_t nb_samples; - int lossless; - int mid_side; - int n_taps; - int down_sampling; - int samples_per_packet; - - int state[2][2048], k[2048]; - int *samples[2]; - int *input_samples; - uint8_t quant[2048]; - BitCount *bits; -} BonkContext; - -static av_cold int bonk_close(AVCodecContext *avctx) -{ - BonkContext *s = avctx->priv_data; - - av_freep(&s->bitstream); - av_freep(&s->input_samples); - av_freep(&s->samples[0]); - av_freep(&s->samples[1]); - av_freep(&s->bits); - s->bitstream_size = 0; - - return 0; -} - -static av_cold int bonk_init(AVCodecContext *avctx) -{ - BonkContext *s = avctx->priv_data; - - avctx->sample_fmt = AV_SAMPLE_FMT_S16P; - if (avctx->extradata_size < 17) - return AVERROR(EINVAL); - - if (avctx->extradata[0]) { - av_log(avctx, AV_LOG_ERROR, "Unsupported version.\n"); - return AVERROR_INVALIDDATA; - } - - if (avctx->ch_layout.nb_channels < 1 || avctx->ch_layout.nb_channels > 2) - return AVERROR_INVALIDDATA; - - s->nb_samples = AV_RL32(avctx->extradata + 1) / avctx->ch_layout.nb_channels; - if (!s->nb_samples) - s->nb_samples = UINT64_MAX; - s->lossless = avctx->extradata[10] != 0; - s->mid_side = avctx->extradata[11] != 0; - s->n_taps = AV_RL16(avctx->extradata + 12); - if (!s->n_taps || s->n_taps > 2048) - return AVERROR(EINVAL); - - s->down_sampling = avctx->extradata[14]; - if (!s->down_sampling) - return AVERROR(EINVAL); - - s->samples_per_packet = AV_RL16(avctx->extradata + 15); - if (!s->samples_per_packet) - return AVERROR(EINVAL); - - if (s->down_sampling * s->samples_per_packet < s->n_taps) - return AVERROR_INVALIDDATA; - - s->max_framesize = s->samples_per_packet * avctx->ch_layout.nb_channels * s->down_sampling * 16LL; - if (s->max_framesize > (INT32_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 8) - return AVERROR_INVALIDDATA; - - s->bitstream = av_calloc(s->max_framesize + AV_INPUT_BUFFER_PADDING_SIZE, sizeof(*s->bitstream)); - if (!s->bitstream) - return AVERROR(ENOMEM); - - s->input_samples = av_calloc(s->samples_per_packet, sizeof(*s->input_samples)); - if (!s->input_samples) - return AVERROR(ENOMEM); - - s->samples[0] = av_calloc(s->samples_per_packet * s->down_sampling, sizeof(*s->samples[0])); - s->samples[1] = av_calloc(s->samples_per_packet * s->down_sampling, sizeof(*s->samples[0])); - if (!s->samples[0] || !s->samples[1]) - return AVERROR(ENOMEM); - - s->bits = av_calloc(s->max_framesize * 8, sizeof(*s->bits)); - if (!s->bits) - return AVERROR(ENOMEM); - - for (int i = 0; i < 512; i++) { - s->quant[i] = sqrt(i + 1); - } - - return 0; -} - -static unsigned read_uint_max(BonkContext *s, uint32_t max) -{ - unsigned value = 0; - - if (max == 0) - return 0; - - av_assert0(max >> 31 == 0); - - for (unsigned i = 1; i <= max - value; i+=i) - if (get_bits1(&s->gb)) - value += i; - - return value; -} - -static int intlist_read(BonkContext *s, int *buf, int entries, int base_2_part) -{ - int i, low_bits = 0, x = 0, max_x; - int n_zeros = 0, step = 256, dominant = 0; - int pos = 0, level = 0; - BitCount *bits = s->bits; - int passes = 1; - - memset(buf, 0, entries * sizeof(*buf)); - if (base_2_part) { - low_bits = get_bits(&s->gb, 4); - - if (low_bits) - for (i = 0; i < entries; i++) - buf[i] = get_bits(&s->gb, low_bits); - } - - while (n_zeros < entries) { - int steplet = step >> 8; - - if (get_bits_left(&s->gb) <= 0) - return AVERROR_INVALIDDATA; - - if (!get_bits1(&s->gb)) { - av_assert0(steplet >= 0); - - if (steplet > 0) { - bits[x ].bit = dominant; - bits[x++].count = steplet; - } - - if (!dominant) - n_zeros += steplet; - - if (step > INT32_MAX*8LL/9 + 1) - return AVERROR_INVALIDDATA; - step += step / 8; - } else if (steplet > 0) { - int actual_run = read_uint_max(s, steplet - 1); - - av_assert0(actual_run >= 0); - - if (actual_run > 0) { - bits[x ].bit = dominant; - bits[x++].count = actual_run; - } - - bits[x ].bit = !dominant; - bits[x++].count = 1; - - if (!dominant) - n_zeros += actual_run; - else - n_zeros++; - - step -= step / 8; - } - - if (step < 256) { - step = 65536 / step; - dominant = !dominant; - } - } - - max_x = x; - x = 0; - n_zeros = 0; - for (i = 0; n_zeros < entries; i++) { - if (x >= max_x) - return AVERROR_INVALIDDATA; - - if (pos >= entries) { - pos = 0; - level += passes << low_bits; - passes = 1; - if (bits[x].bit && bits[x].count > entries - n_zeros) - passes = bits[x].count / (entries - n_zeros); - } - - if (level > 1 << 16) - return AVERROR_INVALIDDATA; - - if (buf[pos] >= level) { - if (bits[x].bit) - buf[pos] += passes << low_bits; - else - n_zeros++; - - av_assert1(bits[x].count >= passes); - bits[x].count -= passes; - x += bits[x].count == 0; - } - - pos++; - } - - for (i = 0; i < entries; i++) { - if (buf[i] && get_bits1(&s->gb)) { - buf[i] = -buf[i]; - } - } - - return 0; -} - -static inline int shift_down(int a, int b) -{ - return (a >> b) + (a < 0); -} - -static inline int shift(int a, int b) -{ - return a + (1 << b - 1) >> b; -} - -#define LATTICE_SHIFT 10 -#define SAMPLE_SHIFT 4 -#define SAMPLE_FACTOR (1 << SAMPLE_SHIFT) - -static int predictor_calc_error(int *k, int *state, int order, int error) -{ - int i, x = error - shift_down(k[order-1] * state[order-1], LATTICE_SHIFT); - int *k_ptr = &(k[order-2]), - *state_ptr = &(state[order-2]); - - for (i = order-2; i >= 0; i--, k_ptr--, state_ptr--) { - unsigned k_value = *k_ptr, state_value = *state_ptr; - - x -= shift_down(k_value * state_value, LATTICE_SHIFT); - state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT); - } - - // don't drift too far, to avoid overflows - x = av_clip(x, -(SAMPLE_FACTOR << 16), SAMPLE_FACTOR << 16); - - state[0] = x; - - return x; -} - -static void predictor_init_state(int *k, unsigned *state, int order) -{ - for (int i = order - 2; i >= 0; i--) { - unsigned x = state[i]; - - for (int j = 0, p = i + 1; p < order; j++, p++) { - int tmp = x + shift_down(k[j] * state[p], LATTICE_SHIFT); - - state[p] += shift_down(k[j] * x, LATTICE_SHIFT); - x = tmp; - } - } -} - -static int bonk_decode(AVCodecContext *avctx, AVFrame *frame, - int *got_frame_ptr, AVPacket *pkt) -{ - BonkContext *s = avctx->priv_data; - GetBitContext *gb = &s->gb; - const uint8_t *buf; - int quant, n, buf_size, input_buf_size; - int ret = AVERROR_INVALIDDATA; - - if ((!pkt->size && !s->bitstream_size) || s->nb_samples == 0) { - *got_frame_ptr = 0; - return pkt->size; - } - - buf_size = FFMIN(pkt->size, s->max_framesize - s->bitstream_size); - input_buf_size = buf_size; - if (s->bitstream_index + s->bitstream_size + buf_size + AV_INPUT_BUFFER_PADDING_SIZE > s->max_framesize) { - memmove(s->bitstream, &s->bitstream[s->bitstream_index], s->bitstream_size); - s->bitstream_index = 0; - } - if (pkt->data) - memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], pkt->data, buf_size); - buf = &s->bitstream[s->bitstream_index]; - buf_size += s->bitstream_size; - s->bitstream_size = buf_size; - if (buf_size < s->max_framesize && pkt->data) { - *got_frame_ptr = 0; - return input_buf_size; - } - - frame->nb_samples = FFMIN(s->samples_per_packet * s->down_sampling, s->nb_samples); - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - goto fail; - - if ((ret = init_get_bits8(gb, buf, buf_size)) < 0) - goto fail; - - skip_bits(gb, s->skip); - if ((ret = intlist_read(s, s->k, s->n_taps, 0)) < 0) - goto fail; - - for (int i = 0; i < s->n_taps; i++) - s->k[i] *= s->quant[i]; - quant = s->lossless ? 1 : get_bits(&s->gb, 16) * SAMPLE_FACTOR; - - for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++) { - const int samples_per_packet = s->samples_per_packet; - const int down_sampling = s->down_sampling; - const int offset = samples_per_packet * down_sampling - 1; - int *state = s->state[ch]; - int *sample = s->samples[ch]; - - predictor_init_state(s->k, state, s->n_taps); - if ((ret = intlist_read(s, s->input_samples, samples_per_packet, 1)) < 0) - goto fail; - - for (int i = 0; i < samples_per_packet; i++) { - for (int j = 0; j < s->down_sampling - 1; j++) { - sample[0] = predictor_calc_error(s->k, state, s->n_taps, 0); - sample++; - } - - sample[0] = predictor_calc_error(s->k, state, s->n_taps, s->input_samples[i] * (unsigned)quant); - sample++; - } - - sample = s->samples[ch]; - for (int i = 0; i < s->n_taps; i++) - state[i] = sample[offset - i]; - } - - if (s->mid_side && avctx->ch_layout.nb_channels == 2) { - for (int i = 0; i < frame->nb_samples; i++) { - s->samples[1][i] += shift(s->samples[0][i], 1); - s->samples[0][i] -= s->samples[1][i]; - } - } - - if (!s->lossless) { - for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++) { - int *samples = s->samples[ch]; - for (int i = 0; i < frame->nb_samples; i++) - samples[i] = shift(samples[i], 4); - } - } - - for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++) { - int16_t *osamples = (int16_t *)frame->extended_data[ch]; - int *samples = s->samples[ch]; - for (int i = 0; i < frame->nb_samples; i++) - osamples[i] = av_clip_int16(samples[i]); - } - - s->nb_samples -= frame->nb_samples; - - s->skip = get_bits_count(gb) - 8 * (get_bits_count(gb) / 8); - n = get_bits_count(gb) / 8; - - if (n > buf_size) { -fail: - s->bitstream_size = 0; - s->bitstream_index = 0; - return AVERROR_INVALIDDATA; - } - - *got_frame_ptr = 1; - - if (s->bitstream_size) { - s->bitstream_index += n; - s->bitstream_size -= n; - return input_buf_size; - } - return n; -} - -const FFCodec ff_bonk_decoder = { - .p.name = "bonk", - CODEC_LONG_NAME("Bonk audio"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_BONK, - .priv_data_size = sizeof(BonkContext), - .init = bonk_init, - FF_CODEC_DECODE_CB(bonk_decode), - .close = bonk_close, - .p.capabilities = AV_CODEC_CAP_DELAY | - AV_CODEC_CAP_DR1 | - AV_CODEC_CAP_SUBFRAMES, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P, - AV_SAMPLE_FMT_NONE }, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cdgraphics.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cdgraphics.c deleted file mode 100644 index 431e99cd762134ac03450f277cdb832a70aff527..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cdgraphics.c +++ /dev/null @@ -1,401 +0,0 @@ -/* - * CD Graphics Video Decoder - * Copyright (c) 2009 Michael Tison - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "avcodec.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "decode.h" - -/** - * @file - * @brief CD Graphics Video Decoder - * @author Michael Tison - * @see http://wiki.multimedia.cx/index.php?title=CD_Graphics - * @see http://www.ccs.neu.edu/home/bchafy/cdb/info/cdg - */ - -/// default screen sizes -#define CDG_FULL_WIDTH 300 -#define CDG_FULL_HEIGHT 216 -#define CDG_DISPLAY_WIDTH 294 -#define CDG_DISPLAY_HEIGHT 204 -#define CDG_BORDER_WIDTH 6 -#define CDG_BORDER_HEIGHT 12 - -/// masks -#define CDG_COMMAND 0x09 -#define CDG_MASK 0x3F - -/// instruction codes -#define CDG_INST_MEMORY_PRESET 1 -#define CDG_INST_BORDER_PRESET 2 -#define CDG_INST_TILE_BLOCK 6 -#define CDG_INST_SCROLL_PRESET 20 -#define CDG_INST_SCROLL_COPY 24 -#define CDG_INST_TRANSPARENT_COL 28 -#define CDG_INST_LOAD_PAL_LO 30 -#define CDG_INST_LOAD_PAL_HIGH 31 -#define CDG_INST_TILE_BLOCK_XOR 38 - -/// data sizes -#define CDG_PACKET_SIZE 24 -#define CDG_DATA_SIZE 16 -#define CDG_TILE_HEIGHT 12 -#define CDG_TILE_WIDTH 6 -#define CDG_MINIMUM_PKT_SIZE 6 -#define CDG_MINIMUM_SCROLL_SIZE 3 -#define CDG_HEADER_SIZE 8 -#define CDG_PALETTE_SIZE 16 - -typedef struct CDGraphicsContext { - AVFrame *frame; - int hscroll; - int vscroll; - uint8_t alpha[CDG_PALETTE_SIZE]; - int cleared; -} CDGraphicsContext; - -static av_cold int cdg_decode_init(AVCodecContext *avctx) -{ - CDGraphicsContext *cc = avctx->priv_data; - - cc->frame = av_frame_alloc(); - if (!cc->frame) - return AVERROR(ENOMEM); - - for (int i = 0; i < CDG_PALETTE_SIZE; i++) - cc->alpha[i] = 0xFFU; - - avctx->pix_fmt = AV_PIX_FMT_PAL8; - return ff_set_dimensions(avctx, CDG_FULL_WIDTH, CDG_FULL_HEIGHT); -} - -static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data) -{ - int y; - int lsize = cc->frame->linesize[0]; - uint8_t *buf = cc->frame->data[0]; - int color = data[0] & 0x0F; - - if (!(data[1] & 0x0F)) { - /// fill the top and bottom borders - memset(buf, color, CDG_BORDER_HEIGHT * lsize); - memset(buf + (CDG_FULL_HEIGHT - CDG_BORDER_HEIGHT) * lsize, - color, CDG_BORDER_HEIGHT * lsize); - - /// fill the side borders - for (y = CDG_BORDER_HEIGHT; y < CDG_FULL_HEIGHT - CDG_BORDER_HEIGHT; y++) { - memset(buf + y * lsize, color, CDG_BORDER_WIDTH); - memset(buf + CDG_FULL_WIDTH - CDG_BORDER_WIDTH + y * lsize, - color, CDG_BORDER_WIDTH); - } - } -} - -static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low) -{ - uint8_t r, g, b; - uint16_t color; - int i; - int array_offset = low ? 0 : 8; - uint32_t *palette = (uint32_t *) cc->frame->data[1]; - - for (i = 0; i < 8; i++) { - color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F); - r = ((color >> 8) & 0x000F) * 17; - g = ((color >> 4) & 0x000F) * 17; - b = ((color ) & 0x000F) * 17; - palette[i + array_offset] = (uint32_t)cc->alpha[i + array_offset] << 24 | r << 16 | g << 8 | b; - } - cc->frame->palette_has_changed = 1; -} - -static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b) -{ - unsigned ci, ri; - int color; - int x, y; - int ai; - int stride = cc->frame->linesize[0]; - uint8_t *buf = cc->frame->data[0]; - - ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll; - ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll; - - if (ri > (CDG_FULL_HEIGHT - CDG_TILE_HEIGHT)) - return AVERROR(EINVAL); - if (ci > (CDG_FULL_WIDTH - CDG_TILE_WIDTH)) - return AVERROR(EINVAL); - - for (y = 0; y < CDG_TILE_HEIGHT; y++) { - for (x = 0; x < CDG_TILE_WIDTH; x++) { - if (!((data[4 + y] >> (5 - x)) & 0x01)) - color = data[0] & 0x0F; - else - color = data[1] & 0x0F; - - ai = ci + x + (stride * (ri + y)); - if (b) - color ^= buf[ai]; - buf[ai] = color; - } - } - - return 0; -} - -#define UP 2 -#define DOWN 1 -#define LEFT 2 -#define RIGHT 1 - -static void cdg_copy_rect_buf(int out_tl_x, int out_tl_y, uint8_t *out, - int in_tl_x, int in_tl_y, uint8_t *in, - int w, int h, int stride) -{ - int y; - - in += in_tl_x + in_tl_y * stride; - out += out_tl_x + out_tl_y * stride; - for (y = 0; y < h; y++) - memcpy(out + y * stride, in + y * stride, w); -} - -static void cdg_fill_rect_preset(int tl_x, int tl_y, uint8_t *out, - int color, int w, int h, int stride) -{ - int y; - - for (y = tl_y; y < tl_y + h; y++) - memset(out + tl_x + y * stride, color, w); -} - -static void cdg_fill_wrapper(int out_tl_x, int out_tl_y, uint8_t *out, - int in_tl_x, int in_tl_y, uint8_t *in, - int color, int w, int h, int stride, int roll) -{ - if (roll) { - cdg_copy_rect_buf(out_tl_x, out_tl_y, out, in_tl_x, in_tl_y, - in, w, h, stride); - } else { - cdg_fill_rect_preset(out_tl_x, out_tl_y, out, color, w, h, stride); - } -} - -static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data, - AVFrame *new_frame, int roll_over) -{ - int color; - int hscmd, h_off, hinc, vscmd, v_off, vinc; - int y; - int stride = cc->frame->linesize[0]; - uint8_t *in = cc->frame->data[0]; - uint8_t *out = new_frame->data[0]; - - color = data[0] & 0x0F; - hscmd = (data[1] & 0x30) >> 4; - vscmd = (data[2] & 0x30) >> 4; - - h_off = FFMIN(data[1] & 0x07, CDG_BORDER_WIDTH - 1); - v_off = FFMIN(data[2] & 0x0F, CDG_BORDER_HEIGHT - 1); - - /// find the difference and save the offset for cdg_tile_block usage - hinc = h_off - cc->hscroll; - vinc = cc->vscroll - v_off; - cc->hscroll = h_off; - cc->vscroll = v_off; - - if (vscmd == UP) - vinc -= 12; - if (vscmd == DOWN) - vinc += 12; - if (hscmd == LEFT) - hinc -= 6; - if (hscmd == RIGHT) - hinc += 6; - - if (!hinc && !vinc) - return; - - memcpy(new_frame->data[1], cc->frame->data[1], CDG_PALETTE_SIZE * 4); - - for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++) - memcpy(out + FFMAX(0, hinc) + stride * y, - in + FFMAX(0, hinc) - hinc + (y - vinc) * stride, - FFABS(stride) - FFABS(hinc)); - - if (vinc > 0) - cdg_fill_wrapper(0, 0, out, - 0, CDG_FULL_HEIGHT - vinc, in, color, - stride, vinc, stride, roll_over); - else if (vinc < 0) - cdg_fill_wrapper(0, CDG_FULL_HEIGHT + vinc, out, - 0, 0, in, color, - stride, -1 * vinc, stride, roll_over); - - if (hinc > 0) - cdg_fill_wrapper(0, 0, out, - CDG_FULL_WIDTH - hinc, 0, in, color, - hinc, CDG_FULL_HEIGHT, stride, roll_over); - else if (hinc < 0) - cdg_fill_wrapper(CDG_FULL_WIDTH + hinc, 0, out, - 0, 0, in, color, - -1 * hinc, CDG_FULL_HEIGHT, stride, roll_over); - -} - -static int cdg_decode_frame(AVCodecContext *avctx, AVFrame *frame, - int *got_frame, AVPacket *avpkt) -{ - GetByteContext gb; - int buf_size = avpkt->size; - int ret; - uint8_t command, inst; - uint8_t cdg_data[CDG_DATA_SIZE] = {0}; - CDGraphicsContext *cc = avctx->priv_data; - - if (buf_size < CDG_MINIMUM_PKT_SIZE) { - av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n"); - return AVERROR(EINVAL); - } - if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) { - av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n"); - return AVERROR(EINVAL); - } - - bytestream2_init(&gb, avpkt->data, avpkt->size); - - if ((ret = ff_reget_buffer(avctx, cc->frame, 0)) < 0) - return ret; - if (!cc->cleared) { - memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height); - memset(cc->frame->data[1], 0, AVPALETTE_SIZE); - cc->cleared = 1; - } - - command = bytestream2_get_byte(&gb); - inst = bytestream2_get_byte(&gb); - inst &= CDG_MASK; - bytestream2_skip(&gb, 2); - bytestream2_get_buffer(&gb, cdg_data, sizeof(cdg_data)); - - if ((command & CDG_MASK) == CDG_COMMAND) { - switch (inst) { - case CDG_INST_MEMORY_PRESET: - if (!(cdg_data[1] & 0x0F)) - memset(cc->frame->data[0], cdg_data[0] & 0x0F, - cc->frame->linesize[0] * CDG_FULL_HEIGHT); - break; - case CDG_INST_LOAD_PAL_LO: - case CDG_INST_LOAD_PAL_HIGH: - if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { - av_log(avctx, AV_LOG_ERROR, "buffer too small for loading palette\n"); - return AVERROR(EINVAL); - } - - cdg_load_palette(cc, cdg_data, inst == CDG_INST_LOAD_PAL_LO); - break; - case CDG_INST_BORDER_PRESET: - cdg_border_preset(cc, cdg_data); - break; - case CDG_INST_TILE_BLOCK_XOR: - case CDG_INST_TILE_BLOCK: - if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { - av_log(avctx, AV_LOG_ERROR, "buffer too small for drawing tile\n"); - return AVERROR(EINVAL); - } - - ret = cdg_tile_block(cc, cdg_data, inst == CDG_INST_TILE_BLOCK_XOR); - if (ret) { - av_log(avctx, AV_LOG_ERROR, "tile is out of range\n"); - return ret; - } - break; - case CDG_INST_SCROLL_PRESET: - case CDG_INST_SCROLL_COPY: - if (buf_size - CDG_HEADER_SIZE < CDG_MINIMUM_SCROLL_SIZE) { - av_log(avctx, AV_LOG_ERROR, "buffer too small for scrolling\n"); - return AVERROR(EINVAL); - } - - if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) - return ret; - - cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY); - av_frame_unref(cc->frame); - ret = av_frame_ref(cc->frame, frame); - if (ret < 0) - return ret; - break; - case CDG_INST_TRANSPARENT_COL: - for (int i = 0; i < CDG_PALETTE_SIZE; i++) - cc->alpha[i] = 255 - ((cdg_data[i] & 0x3f) << 2); - break; - default: - break; - } - - if (!frame->data[0]) { - ret = av_frame_ref(frame, cc->frame); - if (ret < 0) - return ret; - } - *got_frame = 1; - } else { - *got_frame = 0; - } - - return avpkt->size; -} - -static void cdg_decode_flush(AVCodecContext *avctx) -{ - CDGraphicsContext *cc = avctx->priv_data; - - if (!cc->frame->data[0]) - return; - - memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height); - if (!avctx->frame_num) - memset(cc->frame->data[1], 0, AVPALETTE_SIZE); -} - -static av_cold int cdg_decode_end(AVCodecContext *avctx) -{ - CDGraphicsContext *cc = avctx->priv_data; - - av_frame_free(&cc->frame); - - return 0; -} - -const FFCodec ff_cdgraphics_decoder = { - .p.name = "cdgraphics", - CODEC_LONG_NAME("CD Graphics video"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_CDGRAPHICS, - .priv_data_size = sizeof(CDGraphicsContext), - .init = cdg_decode_init, - .close = cdg_decode_end, - FF_CODEC_DECODE_CB(cdg_decode_frame), - .flush = cdg_decode_flush, - .p.capabilities = AV_CODEC_CAP_DR1, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/ac3dsp_mips.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/ac3dsp_mips.c deleted file mode 100644 index a5eaaf8eb2b67a5ac99684e85376d83b3ada0aba..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/ac3dsp_mips.c +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2012 - * MIPS Technologies, Inc., California. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * Authors: Branimir Vasic (bvasic@mips.com) - * Nedeljko Babic (nbabic@mips.com) - * - * Various AC-3 DSP Utils optimized for MIPS - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Reference: libavcodec/ac3dsp.c - */ - -#include "config.h" -#include "libavcodec/ac3dsp.h" -#include "libavcodec/ac3.h" -#include "libavcodec/ac3tab.h" -#include "libavutil/mips/asmdefs.h" - -#if HAVE_INLINE_ASM -#if HAVE_MIPSDSP -static void ac3_bit_alloc_calc_bap_mips(int16_t *mask, int16_t *psd, - int start, int end, - int snr_offset, int floor, - const uint8_t *bap_tab, uint8_t *bap) -{ - int band, band_end, cond; - int m, address1, address2; - int16_t *psd1, *psd_end; - uint8_t *bap1; - - if (snr_offset == -960) { - memset(bap, 0, AC3_MAX_COEFS); - return; - } - - psd1 = &psd[start]; - bap1 = &bap[start]; - band = ff_ac3_bin_to_band_tab[start]; - - do { - m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor; - band_end = ff_ac3_band_start_tab[++band]; - band_end = FFMIN(band_end, end); - psd_end = psd + band_end - 1; - - __asm__ volatile ( - "slt %[cond], %[psd1], %[psd_end] \n\t" - "beqz %[cond], 1f \n\t" - "2: \n\t" - "lh %[address1], 0(%[psd1]) \n\t" - "lh %[address2], 2(%[psd1]) \n\t" - PTR_ADDIU " %[psd1], %[psd1], 4 \n\t" - "subu %[address1], %[address1], %[m] \n\t" - "sra %[address1], %[address1], 5 \n\t" - "addiu %[address1], %[address1], -32 \n\t" - "shll_s.w %[address1], %[address1], 26 \n\t" - "subu %[address2], %[address2], %[m] \n\t" - "sra %[address2], %[address2], 5 \n\t" - "sra %[address1], %[address1], 26 \n\t" - "addiu %[address1], %[address1], 32 \n\t" - "lbux %[address1], %[address1](%[bap_tab]) \n\t" - "addiu %[address2], %[address2], -32 \n\t" - "shll_s.w %[address2], %[address2], 26 \n\t" - "sb %[address1], 0(%[bap1]) \n\t" - "slt %[cond], %[psd1], %[psd_end] \n\t" - "sra %[address2], %[address2], 26 \n\t" - "addiu %[address2], %[address2], 32 \n\t" - "lbux %[address2], %[address2](%[bap_tab]) \n\t" - "sb %[address2], 1(%[bap1]) \n\t" - PTR_ADDIU " %[bap1], %[bap1], 2 \n\t" - "bnez %[cond], 2b \n\t" - PTR_ADDIU " %[psd_end], %[psd_end], 2 \n\t" - "slt %[cond], %[psd1], %[psd_end] \n\t" - "beqz %[cond], 3f \n\t" - "1: \n\t" - "lh %[address1], 0(%[psd1]) \n\t" - PTR_ADDIU " %[psd1], %[psd1], 2 \n\t" - "subu %[address1], %[address1], %[m] \n\t" - "sra %[address1], %[address1], 5 \n\t" - "addiu %[address1], %[address1], -32 \n\t" - "shll_s.w %[address1], %[address1], 26 \n\t" - "sra %[address1], %[address1], 26 \n\t" - "addiu %[address1], %[address1], 32 \n\t" - "lbux %[address1], %[address1](%[bap_tab]) \n\t" - "sb %[address1], 0(%[bap1]) \n\t" - PTR_ADDIU " %[bap1], %[bap1], 1 \n\t" - "3: \n\t" - - : [address1]"=&r"(address1), [address2]"=&r"(address2), - [cond]"=&r"(cond), [bap1]"+r"(bap1), - [psd1]"+r"(psd1), [psd_end]"+r"(psd_end) - : [m]"r"(m), [bap_tab]"r"(bap_tab) - : "memory" - ); - } while (end > band_end); -} - -static void ac3_update_bap_counts_mips(uint16_t mant_cnt[16], uint8_t *bap, - int len) -{ - void *temp0, *temp2, *temp4, *temp5, *temp6, *temp7; - int temp1, temp3; - - __asm__ volatile ( - "andi %[temp3], %[len], 3 \n\t" - PTR_ADDU "%[temp2], %[bap], %[len] \n\t" - PTR_ADDU "%[temp4], %[bap], %[temp3] \n\t" - "beq %[temp2], %[temp4], 4f \n\t" - "1: \n\t" - "lbu %[temp0], -1(%[temp2]) \n\t" - "lbu %[temp5], -2(%[temp2]) \n\t" - "lbu %[temp6], -3(%[temp2]) \n\t" - "sll %[temp0], %[temp0], 1 \n\t" - PTR_ADDU "%[temp0], %[mant_cnt], %[temp0] \n\t" - "sll %[temp5], %[temp5], 1 \n\t" - PTR_ADDU "%[temp5], %[mant_cnt], %[temp5] \n\t" - "lhu %[temp1], 0(%[temp0]) \n\t" - "sll %[temp6], %[temp6], 1 \n\t" - PTR_ADDU "%[temp6], %[mant_cnt], %[temp6] \n\t" - "addiu %[temp1], %[temp1], 1 \n\t" - "sh %[temp1], 0(%[temp0]) \n\t" - "lhu %[temp1], 0(%[temp5]) \n\t" - "lbu %[temp7], -4(%[temp2]) \n\t" - PTR_ADDIU "%[temp2],%[temp2], -4 \n\t" - "addiu %[temp1], %[temp1], 1 \n\t" - "sh %[temp1], 0(%[temp5]) \n\t" - "lhu %[temp1], 0(%[temp6]) \n\t" - "sll %[temp7], %[temp7], 1 \n\t" - PTR_ADDU "%[temp7], %[mant_cnt], %[temp7] \n\t" - "addiu %[temp1], %[temp1],1 \n\t" - "sh %[temp1], 0(%[temp6]) \n\t" - "lhu %[temp1], 0(%[temp7]) \n\t" - "addiu %[temp1], %[temp1], 1 \n\t" - "sh %[temp1], 0(%[temp7]) \n\t" - "bne %[temp2], %[temp4], 1b \n\t" - "4: \n\t" - "beqz %[temp3], 2f \n\t" - "3: \n\t" - "addiu %[temp3], %[temp3], -1 \n\t" - "lbu %[temp0], -1(%[temp2]) \n\t" - PTR_ADDIU "%[temp2],%[temp2], -1 \n\t" - "sll %[temp0], %[temp0], 1 \n\t" - PTR_ADDU "%[temp0], %[mant_cnt], %[temp0] \n\t" - "lhu %[temp1], 0(%[temp0]) \n\t" - "addiu %[temp1], %[temp1], 1 \n\t" - "sh %[temp1], 0(%[temp0]) \n\t" - "bgtz %[temp3], 3b \n\t" - "2: \n\t" - - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [temp4] "=&r" (temp4), [temp5] "=&r" (temp5), - [temp6] "=&r" (temp6), [temp7] "=&r" (temp7) - : [len] "r" (len), [bap] "r" (bap), - [mant_cnt] "r" (mant_cnt) - : "memory" - ); -} -#endif - -#if HAVE_MIPSFPU -#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6 -static void float_to_fixed24_mips(int32_t *dst, const float *src, unsigned int len) -{ - const float scale = 1 << 24; - float src0, src1, src2, src3, src4, src5, src6, src7; - int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; - - do { - __asm__ volatile ( - "lwc1 %[src0], 0(%[src]) \n\t" - "lwc1 %[src1], 4(%[src]) \n\t" - "lwc1 %[src2], 8(%[src]) \n\t" - "lwc1 %[src3], 12(%[src]) \n\t" - "lwc1 %[src4], 16(%[src]) \n\t" - "lwc1 %[src5], 20(%[src]) \n\t" - "lwc1 %[src6], 24(%[src]) \n\t" - "lwc1 %[src7], 28(%[src]) \n\t" - "mul.s %[src0], %[src0], %[scale] \n\t" - "mul.s %[src1], %[src1], %[scale] \n\t" - "mul.s %[src2], %[src2], %[scale] \n\t" - "mul.s %[src3], %[src3], %[scale] \n\t" - "mul.s %[src4], %[src4], %[scale] \n\t" - "mul.s %[src5], %[src5], %[scale] \n\t" - "mul.s %[src6], %[src6], %[scale] \n\t" - "mul.s %[src7], %[src7], %[scale] \n\t" - "cvt.w.s %[src0], %[src0] \n\t" - "cvt.w.s %[src1], %[src1] \n\t" - "cvt.w.s %[src2], %[src2] \n\t" - "cvt.w.s %[src3], %[src3] \n\t" - "cvt.w.s %[src4], %[src4] \n\t" - "cvt.w.s %[src5], %[src5] \n\t" - "cvt.w.s %[src6], %[src6] \n\t" - "cvt.w.s %[src7], %[src7] \n\t" - "mfc1 %[temp0], %[src0] \n\t" - "mfc1 %[temp1], %[src1] \n\t" - "mfc1 %[temp2], %[src2] \n\t" - "mfc1 %[temp3], %[src3] \n\t" - "mfc1 %[temp4], %[src4] \n\t" - "mfc1 %[temp5], %[src5] \n\t" - "mfc1 %[temp6], %[src6] \n\t" - "mfc1 %[temp7], %[src7] \n\t" - "sw %[temp0], 0(%[dst]) \n\t" - "sw %[temp1], 4(%[dst]) \n\t" - "sw %[temp2], 8(%[dst]) \n\t" - "sw %[temp3], 12(%[dst]) \n\t" - "sw %[temp4], 16(%[dst]) \n\t" - "sw %[temp5], 20(%[dst]) \n\t" - "sw %[temp6], 24(%[dst]) \n\t" - "sw %[temp7], 28(%[dst]) \n\t" - - : [dst] "+r" (dst), [src] "+r" (src), - [src0] "=&f" (src0), [src1] "=&f" (src1), - [src2] "=&f" (src2), [src3] "=&f" (src3), - [src4] "=&f" (src4), [src5] "=&f" (src5), - [src6] "=&f" (src6), [src7] "=&f" (src7), - [temp0] "=r" (temp0), [temp1] "=r" (temp1), - [temp2] "=r" (temp2), [temp3] "=r" (temp3), - [temp4] "=r" (temp4), [temp5] "=r" (temp5), - [temp6] "=r" (temp6), [temp7] "=r" (temp7) - : [scale] "f" (scale) - : "memory" - ); - src = src + 8; - dst = dst + 8; - len -= 8; - } while (len > 0); -} - -static void ac3_downmix_mips(float **samples, float (*matrix)[2], - int out_ch, int in_ch, int len) -{ - int i, j, i1, i2, i3; - float v0, v1, v2, v3; - float v4, v5, v6, v7; - float samples0, samples1, samples2, samples3, matrix_j, matrix_j2; - float *samples_p, *samples_sw, *matrix_p, **samples_x, **samples_end; - - __asm__ volatile( - ".set push \n\t" - ".set noreorder \n\t" - - "li %[i1], 2 \n\t" - "sll %[len], 2 \n\t" - "move %[i], $zero \n\t" - "sll %[j], %[in_ch], " PTRLOG " \n\t" - - "bne %[out_ch], %[i1], 3f \n\t" // if (out_ch == 2) - " li %[i2], 1 \n\t" - - "2: \n\t" // start of the for loop (for (i = 0; i < len; i+=4)) - "move %[matrix_p], %[matrix] \n\t" - "move %[samples_x], %[samples] \n\t" - "mtc1 $zero, %[v0] \n\t" - "mtc1 $zero, %[v1] \n\t" - "mtc1 $zero, %[v2] \n\t" - "mtc1 $zero, %[v3] \n\t" - "mtc1 $zero, %[v4] \n\t" - "mtc1 $zero, %[v5] \n\t" - "mtc1 $zero, %[v6] \n\t" - "mtc1 $zero, %[v7] \n\t" - "addiu %[i1], %[i], 4 \n\t" - "addiu %[i2], %[i], 8 \n\t" - PTR_L " %[samples_p], 0(%[samples_x]) \n\t" - "addiu %[i3], %[i], 12 \n\t" - PTR_ADDU "%[samples_end],%[samples_x], %[j] \n\t" - "move %[samples_sw], %[samples_p] \n\t" - - "1: \n\t" // start of the inner for loop (for (j = 0; j < in_ch; j++)) - "lwc1 %[matrix_j], 0(%[matrix_p]) \n\t" - "lwc1 %[matrix_j2], 4(%[matrix_p]) \n\t" - "lwxc1 %[samples0], %[i](%[samples_p]) \n\t" - "lwxc1 %[samples1], %[i1](%[samples_p]) \n\t" - "lwxc1 %[samples2], %[i2](%[samples_p]) \n\t" - "lwxc1 %[samples3], %[i3](%[samples_p]) \n\t" - PTR_ADDIU "%[matrix_p], 8 \n\t" - PTR_ADDIU "%[samples_x]," PTRSIZE " \n\t" - "madd.s %[v0], %[v0], %[samples0], %[matrix_j] \n\t" - "madd.s %[v1], %[v1], %[samples1], %[matrix_j] \n\t" - "madd.s %[v2], %[v2], %[samples2], %[matrix_j] \n\t" - "madd.s %[v3], %[v3], %[samples3], %[matrix_j] \n\t" - "madd.s %[v4], %[v4], %[samples0], %[matrix_j2]\n\t" - "madd.s %[v5], %[v5], %[samples1], %[matrix_j2]\n\t" - "madd.s %[v6], %[v6], %[samples2], %[matrix_j2]\n\t" - "madd.s %[v7], %[v7], %[samples3], %[matrix_j2]\n\t" - "bne %[samples_x], %[samples_end], 1b \n\t" - PTR_L " %[samples_p], 0(%[samples_x]) \n\t" - - PTR_L " %[samples_p], " PTRSIZE "(%[samples]) \n\t" - "swxc1 %[v0], %[i](%[samples_sw]) \n\t" - "swxc1 %[v1], %[i1](%[samples_sw]) \n\t" - "swxc1 %[v2], %[i2](%[samples_sw]) \n\t" - "swxc1 %[v3], %[i3](%[samples_sw]) \n\t" - "swxc1 %[v4], %[i](%[samples_p]) \n\t" - "addiu %[i], 16 \n\t" - "swxc1 %[v5], %[i1](%[samples_p]) \n\t" - "swxc1 %[v6], %[i2](%[samples_p]) \n\t" - "bne %[i], %[len], 2b \n\t" - " swxc1 %[v7], %[i3](%[samples_p]) \n\t" - - "3: \n\t" - "bne %[out_ch], %[i2], 6f \n\t" // if (out_ch == 1) - " nop \n\t" - - "5: \n\t" // start of the outer for loop (for (i = 0; i < len; i+=4)) - "move %[matrix_p], %[matrix] \n\t" - "move %[samples_x], %[samples] \n\t" - "mtc1 $zero, %[v0] \n\t" - "mtc1 $zero, %[v1] \n\t" - "mtc1 $zero, %[v2] \n\t" - "mtc1 $zero, %[v3] \n\t" - "addiu %[i1], %[i], 4 \n\t" - "addiu %[i2], %[i], 8 \n\t" - PTR_L " %[samples_p], 0(%[samples_x]) \n\t" - "addiu %[i3], %[i], 12 \n\t" - PTR_ADDU "%[samples_end],%[samples_x], %[j] \n\t" - "move %[samples_sw], %[samples_p] \n\t" - - "4: \n\t" // start of the inner for loop (for (j = 0; j < in_ch; j++)) - "lwc1 %[matrix_j], 0(%[matrix_p]) \n\t" - "lwxc1 %[samples0], %[i](%[samples_p]) \n\t" - "lwxc1 %[samples1], %[i1](%[samples_p]) \n\t" - "lwxc1 %[samples2], %[i2](%[samples_p]) \n\t" - "lwxc1 %[samples3], %[i3](%[samples_p]) \n\t" - PTR_ADDIU "%[matrix_p], 8 \n\t" - PTR_ADDIU "%[samples_x]," PTRSIZE " \n\t" - "madd.s %[v0], %[v0], %[samples0], %[matrix_j] \n\t" - "madd.s %[v1], %[v1], %[samples1], %[matrix_j] \n\t" - "madd.s %[v2], %[v2], %[samples2], %[matrix_j] \n\t" - "madd.s %[v3], %[v3], %[samples3], %[matrix_j] \n\t" - "bne %[samples_x], %[samples_end], 4b \n\t" - PTR_L " %[samples_p], 0(%[samples_x]) \n\t" - - "swxc1 %[v0], %[i](%[samples_sw]) \n\t" - "addiu %[i], 16 \n\t" - "swxc1 %[v1], %[i1](%[samples_sw]) \n\t" - "swxc1 %[v2], %[i2](%[samples_sw]) \n\t" - "bne %[i], %[len], 5b \n\t" - " swxc1 %[v3], %[i3](%[samples_sw]) \n\t" - "6: \n\t" - - ".set pop" - :[samples_p]"=&r"(samples_p), [matrix_j]"=&f"(matrix_j), [matrix_j2]"=&f"(matrix_j2), - [samples0]"=&f"(samples0), [samples1]"=&f"(samples1), - [samples2]"=&f"(samples2), [samples3]"=&f"(samples3), - [v0]"=&f"(v0), [v1]"=&f"(v1), [v2]"=&f"(v2), [v3]"=&f"(v3), - [v4]"=&f"(v4), [v5]"=&f"(v5), [v6]"=&f"(v6), [v7]"=&f"(v7), - [samples_x]"=&r"(samples_x), [matrix_p]"=&r"(matrix_p), - [samples_end]"=&r"(samples_end), [samples_sw]"=&r"(samples_sw), - [i1]"=&r"(i1), [i2]"=&r"(i2), [i3]"=&r"(i3), [i]"=&r"(i), - [j]"=&r"(j), [len]"+r"(len) - :[samples]"r"(samples), [matrix]"r"(matrix), - [in_ch]"r"(in_ch), [out_ch]"r"(out_ch) - :"memory" - ); -} -#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */ -#endif /* HAVE_MIPSFPU */ -#endif /* HAVE_INLINE_ASM */ - -void ff_ac3dsp_init_mips(AC3DSPContext *c) -{ -#if HAVE_INLINE_ASM -#if HAVE_MIPSDSP - c->bit_alloc_calc_bap = ac3_bit_alloc_calc_bap_mips; - c->update_bap_counts = ac3_update_bap_counts_mips; -#endif -#if HAVE_MIPSFPU -#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6 - c->float_to_fixed24 = float_to_fixed24_mips; - //c->downmix = ac3_downmix_mips; -#endif -#endif - -#endif -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Airport Security Mod APK A Free-to-Play Game with Lots of Features and Challenges.md b/spaces/congsaPfin/Manga-OCR/logs/Airport Security Mod APK A Free-to-Play Game with Lots of Features and Challenges.md deleted file mode 100644 index 3c9b56bcd283fc024852513c7c4d4ef2fe1f223f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Airport Security Mod APK A Free-to-Play Game with Lots of Features and Challenges.md +++ /dev/null @@ -1,83 +0,0 @@ -
-

Airport Security Mod APK Download: A Fun and Free Airport Security Sim

-

Have you ever wondered what it's like to work as an airport security officer? Do you want to experience the thrill and challenge of keeping the passengers and the airport safe from threats? If so, then you should try Airport Security, a free-to-play game that lets you manage an airport security checkpoint. And if you want to make the game even more fun and exciting, you should download Airport Security Mod APK, a modified version of the game that gives you unlimited money and access to all features. In this article, we will tell you more about Airport Security and Airport Security Mod APK, and how you can download and install it on your Android device.

-

What is Airport Security?

-

Airport Security is a game that simulates the work of an airport security officer. You are in charge of screening the passengers and their luggage, checking their documents, scanning them for metal objects, explosives, or other prohibited items, and deciding whether to let them pass or detain them for further inspection. You have to be fast, accurate, and vigilant, as you have to deal with different scenarios, such as angry passengers, suspicious behavior, false alarms, or even terrorist attacks. You also have to balance your budget, as you have to pay for your staff, equipment, and upgrades.

-

airport security mod apk download


Download Zip ☆☆☆ https://urlca.com/2uOa7H



-

Airport Security is a game that tests your skills, judgment, and nerves. You have to deal with different levels of difficulty, from easy to hard, and different types of airports, from small to large. You also have to unlock and upgrade various features, such as new scanners, detectors, staff members, or security measures. You can also customize your checkpoint with different decorations, such as plants, posters, or flags. Airport Security is a game that offers you hours of fun and entertainment.

-

What is Airport Security Mod APK?

-

Airport Security Mod APK is a modified version of the original game that gives you some advantages and benefits. With Airport Security Mod APK, you get unlimited money that you can use to buy anything you want in the game. You also get access to all features, such as all levels, airports, scanners, detectors, staff members, security measures, and decorations. You don't have to worry about ads or in-app purchases either, as they are removed in this version.

-

Airport Security Mod APK is a version that lets you enjoy the game without any limitations or restrictions. You can play the game at your own pace and style, without having to worry about running out of money or missing out on any features. You can also experiment with different combinations of equipment and staff members, and see how they affect your performance and results. Airport Security Mod APK is a version that enhances your gameplay and makes it more fun and exciting.

-

How to Download and Install Airport Security Mod APK?

-

If you want to download and install Airport Security Mod APK on your Android device, you have to follow these simple steps:

-

Step 1: Find a reliable source for the mod apk file

-

There are many websites that offer mod apk files for various games, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you have to be careful and choose a reliable source for the Airport Security Mod APK file. One of the best sources that we recommend is [Modded-1.com], a website that provides high-quality and updated mod apk files for many popular games. You can download Airport Security Mod APK from this website by clicking on this [link].

-

airport security simulator mod apk free download
-airport security game mod apk unlimited money
-airport security pro mod apk latest version
-airport security 3d mod apk android 1
-airport security tycoon mod apk download
-airport security scanner mod apk unlocked
-airport security prank mod apk no ads
-airport security officer mod apk offline
-airport security manager mod apk hack
-airport security check mod apk premium
-airport security duty mod apk revdl
-airport security fun mod apk moddroid
-airport security xray mod apk vip
-airport security quiz mod apk rexdl
-airport security adventure mod apk happymod
-airport security escape mod apk full version
-airport security training mod apk unlimited coins
-airport security simulator 2 mod apk download
-airport security guard mod apk unlimited gems
-airport security life mod apk all levels unlocked
-airport security rush mod apk no root
-airport security job mod apk online
-airport security master mod apk obb
-airport security crazy mod apk pure
-airport security expert mod apk update
-airport security simulator 3d mod apk download for pc
-airport security challenge mod apk unlimited everything
-airport security simulator pro mod apk free shopping
-airport security test mod apk god mode
-airport security simulator 2023 mod apk download for android
-airport security tips mod apk mega mod
-airport security simulator premium mod apk cracked
-airport security rules mod apk one hit kill
-airport security simulator deluxe mod apk download for ios
-airport security facts mod apk high damage
-airport security simulator hd mod apk download for windows 10
-airport security history mod apk unlimited health
-airport security simulator plus mod apk download for mac
-airport security trivia mod apk unlimited ammo
-airport security simulator gold mod apk download for laptop
-airport security laws mod apk unlimited energy
-airport security simulator ultimate mod apk download for chromebook
-airport security quizlet mod apk unlimited lives
-airport security simulator extreme mod apk download for tablet
-airport security news mod apk unlimited time
-airport security simulator super mod apk download for firestick
-airport security jokes mod apk unlimited hints

-

Step 2: Enable unknown sources on your device settings

-

Before you can install the Airport Security Mod APK file on your device, you have to enable unknown sources on your device settings. This is because the mod apk file is not from the official Google Play Store, and your device may block it by default. To enable unknown sources, you have to go to your device settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". This will allow you to install the mod apk file without any problems.

-

Step 3: Download the mod apk file and tap on it to install it

-

After you have enabled unknown sources, you can proceed to download the Airport Security Mod APK file from the link that we provided above. The file size is about 60 MB, so it should not take too long to download. Once the download is complete, you can tap on the file to start the installation process. You may see a pop-up window that asks for your permission to install the app. Just tap on "install" and wait for a few seconds until the installation is done.

-

Step 4: Launch the game and enjoy the mod features

-

Once the installation is finished, you can launch the game by tapping on its icon on your home screen or app drawer. You will see a welcome screen that shows you the mod features that you have unlocked, such as unlimited money and all features. You can then start playing the game and enjoy the mod features. You can buy anything you want in the game, such as new scanners, detectors, staff members, security measures, or decorations. You can also access all levels, airports, and scenarios in the game. You can also play the game without any ads or interruptions.

-

Conclusion

-

Airport Security is a fun and free airport security sim that you can play on your Android device. It lets you experience the work of an airport security officer and challenges you to keep the passengers and the airport safe from threats. It also features different scenarios, levels, and upgrades that make the game more interesting and entertaining.

-

Airport Security Mod APK is a modified version of the game that gives you more money and features to enhance your gameplay. It lets you enjoy the game without any limitations or restrictions. It also lets you play the game without any ads or in-app purchases. It is easy to download and install on your device by following the steps above.

-

If you are looking for a fun and free airport security sim that you can play on your Android device, you should try Airport Security and Airport Security Mod APK. They are both great games that will keep you entertained for hours.

-

FAQs

-

Q: Is Airport Security Mod APK safe to use?

-

A: Yes, Airport Security Mod APK is safe to use as long as you download it from a reliable source like [Modded-1.com]. It does not contain any viruses, malware, or spyware that can harm your device or steal your personal information.

-

Q: Do I need to root my device to use Airport Security Mod APK?

-

A: No, you do not need to root your device to use Airport Security Mod APK. You just need to enable unknown sources on your device settings and follow the steps above to install it.

-

Q: Can I play Airport Security Mod APK online with other players?

-

A: No, Airport Security Mod APK is not an online game. It is a single-player game that does not require an internet connection to play.

-

Q: Can I update Airport Security Mod APK when there is a new version of the game?

-

A: Yes, you can update Airport Security Mod APK when there is a new version of the game. However, you have to download and install the new mod apk file from [Modded-1.com] again. You cannot update it from the Google Play Store.

-

Q: What are some other games like Airport Security that I can play? 197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Anger of Stick 5 Zombie MOD - The Best Way to Experience the Game.md b/spaces/congsaPfin/Manga-OCR/logs/Anger of Stick 5 Zombie MOD - The Best Way to Experience the Game.md deleted file mode 100644 index 6a1be0e6a14c4dd3a21baf31b06719825d46daab..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Anger of Stick 5 Zombie MOD - The Best Way to Experience the Game.md +++ /dev/null @@ -1,125 +0,0 @@ - -

Download Mod Anger of Stick 5 Zombie: How to Enjoy the Game with Unlimited Money and Features

-

Are you a fan of stickman games? Do you love fighting zombies and saving the world? If yes, then you should try Anger of Stick 5: Zombie, one of the most popular and addictive games about stickmen and zombies. But wait, there's more! You can also download the mod version of Anger of Stick 5: Zombie, which gives you unlimited money and features to make the game even more fun and exciting. In this article, we will tell you everything you need to know about Anger of Stick 5: Zombie, how to download and install the mod apk, and how to play and enjoy the game with the mod version. Let's get started!

-

What is Anger of Stick 5: Zombie?

-

Anger of Stick 5: Zombie is a game developed by J-PARK, a Korean company that specializes in stickman games. It is the fifth installment in the Anger of Stick series, which has over 100 million downloads worldwide. The game is available for both Android and iOS devices, and it has been rated 4.4 out of 5 stars on Google Play Store.

-

download mod anger of stick 5 zombie


Download File >>> https://urlca.com/2uOcuo



-

The gameplay and features of Anger of Stick 5: Zombie

-

The game is a side-scrolling action game, where you control a stickman hero who has to fight against hordes of zombies that have invaded the city. You can use various weapons, such as guns, swords, axes, grenades, and even helicopters, to kill the zombies and save your friends. You can also upgrade your skills and abilities, such as health, power, speed, and defense, to become stronger and more resilient.

-

The game has several features that make it fun and challenging, such as:

-
    -
  • Different modes: You can choose from different modes, such as single mode, zombie mode, friend mode, survival mode, and online mode, to enjoy different scenarios and objectives.
  • -
  • Different characters: You can choose from different characters, such as stickman hero, stickman fighter, stickman sniper, stickman gunner, and stickman warrior, to suit your style and preference.
  • -
  • Different enemies: You can face different enemies, such as normal zombies, boss zombies, giant zombies, dog zombies, and robot zombies, each with their own strengths and weaknesses.
  • -
  • Different stages: You can play on different stages, such as city stage, subway stage, desert stage, snow stage, and forest stage, each with their own challenges and surprises.
  • -
-

The benefits of downloading the mod version of Anger of Stick 5: Zombie

-

While the original version of Anger of Stick 5: Zombie is already fun and addictive, you can make it even better by downloading the mod version. The mod version gives you unlimited money and features that will enhance your gaming experience. Some of the benefits of downloading the mod version are:

-
    -
  • Unlimited money: You can get unlimited money in the game, which you can use to buy weapons, items, upgrades, characters, and more. You don't have to worry about running out of money or spending real money on in-app purchases.
  • -
  • Unlocked features: You can get access to all the features in the game without any restrictions or limitations. You can play on any mode, stage, character, or weapon that you want. You don't have to wait for unlocking them or completing certain tasks.
  • -
  • Unlimited health: You can get unlimited health in the game, which means you can survive any attack from the zombies. You don't have to worry about dying or losing your progress.
  • -
  • Unlimited ammo: You can get unlimited ammo in the game, which means you can fire your weapons as much as you want. You don't have to worry about running out of ammo or reloading your weapons.
  • -
-

How to download and install the mod apk of Anger of Stick 5: Zombie

-

If you are interested in downloading the mod version of Anger of Stick 5: Zombie, you need to follow some simple steps. But before that, you need to make sure that you meet some requirements and take some precautions.

-

The requirements and precautions for downloading the mod apk

-

Here are the requirements and precautions that you need to consider before downloading the mod apk:

-
    -
  • You need to have an Android device that runs on Android 4.1 or higher. The mod apk may not work on lower versions of Android.
  • -
  • You need to have enough storage space on your device to download and install the mod apk. The mod apk file size is about 40 MB, and the original game file size is about 30 MB.
  • -
  • You need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install the mod apk from a third-party source.
  • -
  • You need to uninstall the original version of Anger of Stick 5: Zombie from your device if you have it installed. The mod apk will not work if you have the original version on your device.
  • -
  • You need to download the mod apk from a trusted and reliable source. There are many websites that offer the mod apk, but some of them may contain viruses or malware that can harm your device. We recommend that you download the mod apk from [this link], which is safe and verified.
  • -
-

The steps to download and install the mod apk

-

Here are the steps that you need to follow to download and install the mod apk:

-
    -
  1. Click on [this link] to go to the download page of the mod apk.
  2. -
  3. Click on the download button and wait for the download to complete.
  4. -
  5. Go to your file manager and locate the downloaded mod apk file.
  6. -
  7. Tap on the file and click on install.
  8. -
  9. Wait for the installation to finish.
  10. -
  11. Launch the game and enjoy!
  12. -
-

How to play and enjoy the mod version of Anger of Stick 5: Zombie

-

Now that you have downloaded and installed the mod version of Anger of Stick 5: Zombie, you are ready to play and enjoy the game with unlimited money and features. Here are some tips and tricks that will help you master the game and have more fun.

-

download mod anger of stick 5 zombie unlimited money
-download mod anger of stick 5 zombie apk free
-download mod anger of stick 5 zombie latest version
-download mod anger of stick 5 zombie offline
-download mod anger of stick 5 zombie for android
-download mod anger of stick 5 zombie cheats
-download mod anger of stick 5 zombie hack
-download mod anger of stick 5 zombie game
-download mod anger of stick 5 zombie online
-download mod anger of stick 5 zombie full
-download mod anger of stick 5 zombie no ads
-download mod anger of stick 5 zombie unlimited coins
-download mod anger of stick 5 zombie for pc
-download mod anger of stick 5 zombie gameplay
-download mod anger of stick 5 zombie review
-download mod anger of stick 5 zombie tips and tricks
-download mod anger of stick 5 zombie best weapons
-download mod anger of stick 5 zombie characters
-download mod anger of stick 5 zombie missions
-download mod anger of stick 5 zombie levels
-download mod anger of stick 5 zombie guide
-download mod anger of stick 5 zombie walkthrough
-download mod anger of stick 5 zombie how to play
-download mod anger of stick 5 zombie features
-download mod anger of stick 5 zombie graphics
-download mod anger of stick 5 zombie sound effects
-download mod anger of stick 5 zombie music
-download mod anger of stick 5 zombie controls
-download mod anger of stick 5 zombie settings
-download mod anger of stick 5 zombie system requirements
-download mod anger of stick 5 zombie update
-download mod anger of stick 5 zombie new version
-download mod anger of stick 5 zombie bugs and fixes
-download mod anger of stick 5 zombie support and feedback
-download mod anger of stick 5 zombie developer and publisher
-download mod anger of stick 5 zombie rating and reviews
-download mod anger of stick 5 zombie downloads and installs
-download mod anger of stick 5 zombie alternatives and similar games
-download mod anger of stick 5 zombie genre and category
-download mod anger of stick 5 zombie release date and history

-

The tips and tricks to master the game

-

Here are some tips and tricks that will help you master the game:

-
    -
  • Use different weapons and items: The game offers a variety of weapons and items that you can use to fight against the zombies. You can switch between them by tapping on their icons on the screen. You can also buy new weapons and items with your unlimited money. Try different combinations of weapons and items to find out which ones suit your style and situation best.
  • -
  • Use different characters and skills: The game offers different characters that have different skills and abilities. You can switch between them by tapping on their icons on the screen. You can also buy new characters with your unlimited money. Try different characters and skills to find out which ones suit your style and situation best.
  • -
  • Use different modes and stages: The game offers different modes and stages that have different scenarios and objectives. You can choose any mode or stage that you want with your unlocked features. Try different modes and stages to find out which ones suit your style and situation best.
  • -
  • Use helicopters and robots: The game offers helicopters and robots that you can use to fight against the zombies. You can summon them by tapping on their icons on the screen. You can also buy new helicopters and robots with your unlimited money. Try using helicopters and robots to find out how they can help you in your battles.
  • -
-

The best features and modes to explore in the mod version

-

Here are some of the best features and modes that you can explore in the mod version:

-
    -
  • Zombie mode: This is the most popular and challenging mode in the game, where you have to survive as long as possible against endless waves of zombies. You can use your unlimited money and features to buy and use the best weapons, items, characters, skills, helicopters, and robots in this mode. You can also compete with other players online and see who can last longer in this mode.
  • -
  • Friend mode: This is a mode where you can play with your friends online or offline. You can team up with up to three friends and fight against the zombies together. You can also chat with your friends and share your strategies and tips. You can use your unlimited money and features to buy and use the best weapons, items, characters, skills, helicopters, and robots in this mode.
  • -
  • Survival mode: This is a mode where you have to survive as long as possible with limited resources. You have to manage your health, ammo, and money carefully in this mode. You can also challenge yourself by increasing the difficulty level and the number of zombies. You can use your unlimited money and features to buy and use the best weapons, items, characters, skills, helicopters, and robots in this mode.
  • -
  • Online mode: This is a mode where you can play with other players online. You can join or create a room and play with up to four players. You can choose from different modes, such as team deathmatch, free for all, capture the flag, and zombie mode. You can also chat with other players and make new friends. You can use your unlimited money and features to buy and use the best weapons, items, characters, skills, helicopters, and robots in this mode.
  • -
-

Conclusion

-

Anger of Stick 5: Zombie is a game that will keep you entertained for hours with its action-packed gameplay and features. You can download the mod version of Anger of Stick 5: Zombie to enjoy the game with unlimited money and features. You can also play the game with your friends online or offline. You can explore different modes, stages, characters, weapons, items, skills, helicopters, and robots in the game. You can also master the game with some tips and tricks that we have shared with you. So what are you waiting for? Download the mod version of Anger of Stick 5: Zombie now and have fun!

-

A call to action for the readers

-

If you liked this article, please share it with your friends and family who are also fans of stickman games and zombies. You can also leave a comment below and tell us what you think about Anger of Stick 5: Zombie and its mod version. We would love to hear from you!

-

FAQs

-

Here are some frequently asked questions about Anger of Stick 5: Zombie and its mod version:

-

Q: Is Anger of Stick 5: Zombie free to play?

-

A: Yes, Anger of Stick 5: Zombie is free to play on both Android and iOS devices. However, it contains ads and in-app purchases that may affect your gaming experience. You can download the mod version of Anger of Stick 5: Zombie to remove the ads and get unlimited money.

-

Q: Is Anger of Stick 5: Zombie safe to download?

-

A: Yes, Anger of Stick 5: Zombie is safe to download from the official sources, such as Google Play Store or App Store. However, if you want to download the mod version of Anger of Stick 5: Zombie, you need to be careful about the source that you choose. Some websites may offer fake or harmful mod apks that can damage your device or steal your data. We recommend that you download the mod version of Anger of Stick 5: Zombie from [this link], which is safe and verified.

-

Q: How do I update Anger of Stick 5: Zombie?

-

A: If you have downloaded Anger of Stick 5: Zombie from the official sources, you can update it automatically or manually through the app store. However, if you have downloaded the mod version of Anger of Stick 5: Zombie, you need to check for updates from the website that you downloaded it from. You may need to uninstall the old mod apk and install the new one to get the latest features.

-

Q: How do I contact the developers of Anger of Stick 5: Zombie?

-

A: If you have any questions or feedback about Anger of Stick 5: Zombie or its mod version , you can contact the developers of Anger of Stick 5: Zombie by sending an email to [this address]. You can also visit their website or follow them on their social media accounts to get the latest news and updates about the game.

-

Q: What are some other stickman games that I can play?

-

A: If you love stickman games, you can try some other games that are similar to Anger of Stick 5: Zombie, such as:

-
    -
  • Stick War: Legacy: This is a game where you have to lead a stickman army and conquer the world. You can use different units, weapons, and strategies to defeat your enemies. You can also play online with other players and join tournaments.
  • -
  • Stickman Legends: Shadow War: This is a game where you have to fight against the dark forces and save the world. You can choose from different stickman heroes, each with their own skills and abilities. You can also upgrade your weapons, armor, and skills to become stronger.
  • -
  • Stick Fight: The Game Mobile: This is a game where you have to fight against other stickmen in various maps and modes. You can use different weapons, items, and physics to knock out your opponents. You can also play online with your friends or random players.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Bus Simulator Indonesia XUV 500 mod features and tips.md b/spaces/congsaPfin/Manga-OCR/logs/Bus Simulator Indonesia XUV 500 mod features and tips.md deleted file mode 100644 index e4e4da9739d14bf45859479d8c7c8d0f5f6034c3..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Bus Simulator Indonesia XUV 500 mod features and tips.md +++ /dev/null @@ -1,166 +0,0 @@ -
-

XUV 500 Download Bus Simulator Indonesia: A Guide for Gamers

-

Do you love driving simulation games? Do you want to experience what it's like to be a bus driver in Indonesia? Do you want to drive a stylish and powerful SUV in the busy streets of Jakarta, Bali, or Surabaya? If you answered yes to any of these questions, then you should try Bus Simulator Indonesia with XUV 500 mod. In this article, we will show you how to download and install Bus Simulator Indonesia and XUV 500 mod, how to play the game with the mod, and some tips and tricks to make your gaming experience more enjoyable.

-

What is Bus Simulator Indonesia?

-

Bus Simulator Indonesia (also known as BUSSID) is a fun and authentic game that lets you experience what it's like to be a bus driver in Indonesia. It has many features that make it stand out from other bus simulator games, including the ability to design your own livery, authentic Indonesian cities and places, Indonesian buses, cool and fun honks, high-quality and detailed 3D graphics, online multiplayer convoy, and more. The game is developed by Maleo and is available for free on Google Play Store. You can also use your own 3D model using the vehicle mod system.

-

xuv 500 download bus simulator indonesia


Download Zip ✶✶✶ https://urlca.com/2uO60d



-

Features of Bus Simulator Indonesia

-

Some of the top features of Bus Simulator Indonesia are:

-
    -
  • Design your own livery: You can create your own unique design for your bus using the livery editor. You can choose from different colors, stickers, logos, patterns, and more. You can also share your livery with other players or download their liveries.
  • -
  • Very easy and intuitive control: You can control your bus using various options, such as tilt, steering wheel, buttons, or slider. You can also adjust the camera angle, turn on/off the headlights, honk the horn, open/close the door, and more.
  • -
  • Authentic Indonesian cities and places: You can drive your bus in various locations in Indonesia, such as Jakarta, Bali, Surabaya, Palembang, Medan, Bandung, etc. You can also see famous landmarks, such as Monas, Tanah Lot, Borobudur, etc.
  • -
  • Indonesian buses: You can choose from different types of buses that are popular in Indonesia, such as double-decker buses, mini buses, city buses, intercity buses, etc. Each bus has its own characteristics and features.
  • -
  • Cool and fun honks: You can honk your horn in different ways to communicate with other drivers or pedestrians. You can also use the iconic “Om Telolet Om!” honk that became a viral phenomenon in Indonesia.
  • -
  • High quality and detailed 3D graphics: You can enjoy the realistic and immersive graphics of the game that show the details of the buses, the environments, the weather effects, the traffic situations, etc.
  • -
  • No obstructive ads while driving: You can play the game without being disturbed by annoying ads while driving. The ads are only shown on billboards or banners along the road.
  • -
  • Leaderboard: You can compete with other players on the leaderboard based on your score, distance traveled, passengers transported, etc.
  • -
  • Data saved online: You can save your progress online using your Google account. You can also sync your data across different devices.
  • -
  • Use your own 3D model using vehicle mod system: You can use your own 3D model using vehicle mod system: You can add your own custom vehicle to the game using the vehicle mod system. You can create your own 3D model using any software, such as Blender, SketchUp, etc., and import it to the game. You can also adjust the parameters, such as the speed, the weight, the suspension, etc., of your vehicle.
  • -
-

How to download and install Bus Simulator Indonesia

-

To download and install Bus Simulator Indonesia on your Android device, you can follow these simple steps:

-

xuv 500 mod for bus simulator indonesia
-how to add mahindra xuv 500 mod in bussid
-bus simulator indonesia mahindra xuv 500 gameplay
-bussid mod mahindra xuv 500 download link
-bus simulator indonesia xuv 500 car mod
-mahindra xuv 500 mod evil simulation bussid
-bus simulator indonesia maleo xuv 500
-how to install xuv 500 mod in bus simulator indonesia
-bus simulator indonesia xuv 500 mod apk
-mahindra xuv 500 mod for bussid v3.6
-bus simulator indonesia xuv 500 livery
-bussid mod mahindra xuv 500 w11
-bus simulator indonesia xuv 500 horn
-mahindra xuv 500 mod for bus simulator indonesia android
-bus simulator indonesia xuv 500 interior
-bussid mod mahindra xuv 500 black edition
-bus simulator indonesia xuv 500 sound
-mahindra xuv 500 mod for bussid v4.0
-bus simulator indonesia xuv 500 top speed
-bussid mod mahindra xuv 500 review
-bus simulator indonesia xuv 500 vs fortuner
-mahindra xuv 500 mod for bussid v4.1
-bus simulator indonesia xuv 500 update
-bussid mod mahindra xuv 500 free download
-bus simulator indonesia xuv 500 vs scorpio
-mahindra xuv 500 mod for bussid v4.2
-bus simulator indonesia xuv 500 new model
-bussid mod mahindra xuv 500 price in india
-bus simulator indonesia xuv 500 vs innova crysta
-mahindra xuv 500 mod for bussid v4.3
-bus simulator indonesia xuv 500 features
-bussid mod mahindra xuv 500 mileage
-bus simulator indonesia xuv 500 vs harrier
-mahindra xuv 500 mod for bussid v4.4
-bus simulator indonesia xuv 500 accessories
-bussid mod mahindra xuv 500 specifications
-bus simulator indonesia xuv 500 vs creta
-mahindra xuv 500 mod for bussid v4.5
-bus simulator indonesia xuv 500 colours
-bussid mod mahindra xuv 500 images

-
    -
  1. Go to Google Play Store and search for Bus Simulator Indonesia or click [here].
  2. -
  3. Tap on the Install button and wait for the download to finish.
  4. -
  5. Once the download is complete, tap on the Open button to launch the game.
  6. -
  7. Grant the necessary permissions to the game, such as storage, location, etc.
  8. -
  9. Choose your preferred language and sign in with your Google account.
  10. -
  11. Enjoy playing Bus Simulator Indonesia!
  12. -
-

What is XUV 500 mod?

-

XUV 500 mod is a vehicle mod for Bus Simulator Indonesia that adds a new SUV to the game. XUV 500 is a popular and luxurious SUV model from Mahindra, an Indian automobile company. It has a sleek design, a spacious interior, a powerful engine, and many advanced features. XUV 500 mod allows you to drive this SUV in Bus Simulator Indonesia and enjoy its performance and style.

-

Features of XUV 500 mod

-

Some of the features of XUV 500 mod are:

-
    -
  • Realistic and detailed 3D model: The XUV 500 mod has a high-quality and accurate 3D model that resembles the real XUV 500. It has realistic textures, colors, lights, shadows, reflections, etc.
  • -
  • Smooth and dynamic driving: The XUV 500 mod has a smooth and responsive driving mechanism that lets you control the SUV with ease. It has realistic physics, sounds, animations, etc.
  • -
  • Customizable livery: The XUV 500 mod has a customizable livery that lets you change the color and design of your SUV. You can choose from different presets or create your own livery using the livery editor.
  • -
  • Compatible with other mods: The XUV 500 mod is compatible with other vehicle mods for Bus Simulator Indonesia. You can use it along with other mods without any issues.
  • -
-

How to download and install XUV 500 mod

-

To download and install XUV 500 mod on your Android device, you can follow these simple steps:

-
    -
  1. Go to [this link] and download the XUV 500 mod file (XUV_500_Bus_Simulator_Indonesia.zip).
  2. -
  3. Extract the zip file using any file manager app, such as ZArchiver. You will get two files: XUV_500_Bus_Simulator_Indonesia.bussidvehicle and Livery_XUV_500_Bus_Simulator_Indonesia.png.
  4. -
  5. Copy the XUV_500_Bus_Simulator_Indonesia.bussidvehicle file to the BUSSID folder in your internal storage. If you don't have this folder, create one.
  6. -
  7. Copy the Livery_XUV_500_Bus_Simulator_Indonesia.png file to the Livery folder in your internal storage. If you don't have this folder, create one.
  8. -
  9. Launch Bus Simulator Indonesia and go to Garage.
  10. -
  11. Tap on the Vehicle icon and select Import.
  12. -
  13. Select the XUV_500_Bus_Simulator_Indonesia.bussidvehicle file from the BUSSID folder and tap on Import.
  14. -
  15. You will see a message saying "Vehicle imported successfully". Tap on OK.
  16. -
  17. You will see the XUV 500 mod in your garage. Tap on it to select it.
  18. -
  19. If you want to change the livery of your XUV 500, tap on Livery icon and select Custom Livery.
  20. -
  21. Select the Livery_XUV_500_Bus_Simulator_Indonesia.png file from the Livery folder and tap on Apply.
  22. -
  23. You have successfully installed the XUV 500 mod. Enjoy driving it!
  24. -
-

How to play Bus Simulator Indonesia with XUV 500 mod

-

Now that you have installed Bus Simulator Indonesia and XUV 500 mod, you are ready to play the game with the mod. Here are some tips and tricks to help you get started get started:

-

Tips and tricks for driving XUV 500 in Bus Simulator Indonesia

-

The XUV 500 is a fast and agile SUV that can handle different terrains and situations. However, it also requires some skills and caution to drive it safely and efficiently. Here are some tips and tricks for driving XUV 500 in Bus Simulator Indonesia:

-
    -
  • Adjust the camera angle: You can change the camera angle by swiping the screen or tapping the camera icon. You can choose from different views, such as first-person, third-person, top-down, etc. Choose the one that suits your preference and visibility.
  • -
  • Use the indicators: You can use the indicators to signal your intention to turn left or right. This will help you avoid accidents and traffic violations. You can also use the hazard lights to warn other drivers of any danger or emergency.
  • -
  • Follow the traffic rules: You should follow the traffic rules and regulations in Bus Simulator Indonesia, such as speed limits, traffic lights, road signs, etc. This will help you avoid fines and penalties, as well as improve your score and reputation.
  • -
  • Be careful of the traffic: You should be aware of the traffic situation and the behavior of other drivers and pedestrians. You should avoid collisions, overtaking, cutting lanes, honking unnecessarily, etc. You should also respect the right of way and yield to other vehicles when necessary.
  • -
  • Use the brakes wisely: You should use the brakes to slow down or stop your vehicle when needed. However, you should also avoid braking too hard or too often, as this will damage your vehicle and reduce your fuel efficiency. You should also use the handbrake to park your vehicle or to perform drifts.
  • -
  • Refuel your vehicle: You should refuel your vehicle when the fuel gauge is low. You can find gas stations along the road or on the map. You should also check your oil level and change it when needed.
  • -
  • Repair your vehicle: You should repair your vehicle when it is damaged or broken. You can find repair shops along the road or on the map. You should also check your tire pressure and change it when needed.
  • -
-

How to customize your XUV 500 livery

-

You can customize your XUV 500 livery by using the livery editor or by downloading other players' liveries. Here are the steps to customize your XUV 500 livery:

-
    -
  1. Go to Garage and tap on Livery icon.
  2. -
  3. Select Custom Livery.
  4. -
  5. Select Edit Livery.
  6. -
  7. You will see a blank template of your XUV 500. You can use different tools to create your own design, such as paint bucket, brush, eraser, sticker, text, etc.
  8. -
  9. You can also use different layers to organize your design elements.
  10. -
  11. You can also import images from your gallery or camera to use as stickers or backgrounds.
  12. -
  13. When you are done with your design, tap on Save Livery.
  14. -
  15. You can also share your livery with other players by tapping on Share Livery.
  16. -
  17. You can also download other players' liveries by tapping on Download Livery.
  18. -
  19. You can apply your livery to your XUV 500 by tapping on Apply Livery.
  20. -
-

How to join online multiplayer convoy with XUV 500

-

You can join online multiplayer convoy with XUV 500 by following these steps:

-
    -
  1. Go to Menu and tap on Multiplayer icon.
  2. -
  3. Select Online Convoy.
  4. -
  5. Select a server that has available slots and tap on Join Server.
  6. -
  7. You will see a list of rooms that are created by other players. You can join an existing room or create your own room.
  8. -
  9. If you join an existing room, you will see the details of the room, such as the name, the password (if any), the number of players, the destination, etc. Tap on Join Room to enter the room.
  10. -
  11. If you create your own room, you will see a screen where you can set up your room settings, such as the name, the password (if any), the destination, etc. Tap on Create Room to create the room.
  12. -
  13. Once you are in a room, you will see a chat box where you can communicate with other players in the room. You can also see their names, their vehicles, their locations, etc.
  14. -
  15. You can also invite your friends to join your room by tapping on Invite Friends icon.
  16. -
  17. When everyone is ready, tap on Start Convoy to start driving together with other players in the room.
  18. -
  19. You can also leave the room or end the convoy by tapping on Leave Room or End Convoy icon.
  20. -
-

Conclusion

-

Bus Simulator Indonesia is a fun and realistic game that lets you experience what it's like to be a bus driver in Indonesia. You can also use the XUV 500 mod to drive a stylish and powerful SUV in the game. You can download and install Bus Simulator Indonesia and XUV 500 mod by following the steps in this article. You can also customize your XUV 500 livery, join online multiplayer convoy, and enjoy driving with other players. We hope this article has helped you learn more about Bus Simulator Indonesia and XUV 500 mod. Happy gaming!

-

Summary of the main points

-

Here are the main points of this article:

-
    -
  • Bus Simulator Indonesia is a game that simulates bus driving in Indonesia.
  • -
  • XUV 500 mod is a vehicle mod that adds a new SUV to Bus Simulator Indonesia.
  • -
  • You can download and install Bus Simulator Indonesia and XUV 500 mod by following the steps in this article.
  • -
  • You can customize your XUV 500 livery, join online multiplayer convoy, and enjoy driving with other players.
  • -
-

Recommendations for further reading

-

If you want to learn more about Bus Simulator Indonesia and XUV 500 mod, you can check out these resources:

-
    -
  • [Bus Simulator Indonesia official website]: You can find more information about the game, such as the latest updates, news, events, etc.
  • -
  • [Bus Simulator Indonesia official Facebook page]: You can follow the official Facebook page of the game to get the latest news, updates, tips, tricks, etc.
  • -
  • [XUV 500 mod YouTube video]: You can watch this YouTube video that shows how to download and install XUV 500 mod for Bus Simulator Indonesia.
  • -
  • [XUV 500 mod Facebook group]: You can join this Facebook group that is dedicated to XUV 500 mod for Bus Simulator Indonesia. You can share your livery, screenshots, videos, etc., with other players.
  • -
-

FAQs

-

Here are some frequently asked questions about Bus Simulator Indonesia and XUV 500 mod:

-
    -
  1. Q: Is Bus Simulator Indonesia free to play?
    A: Yes, Bus Simulator Indonesia is free to play. However, it contains some in-app purchases that can enhance your gaming experience.
  2. -
  3. Q: Is XUV 500 mod free to download?
    A: Yes, XUV 500 mod is free to download. However, you need to have Bus Simulator Indonesia installed on your device first.
  4. -
  5. Q: Can I use other vehicle mods with XUV 500 mod?
    A: Yes, you can use other vehicle mods with XUV 500 mod. However, you need to make sure that they are compatible with each other and with the game version.
  6. -
  7. Q: Can I play Bus Simulator Indonesia offline?
    A: Yes, you can play Bus Simulator Indonesia offline. However, you need to have an internet connection to save your progress online or to join online multiplayer convoy.
  8. -
  9. Q: Can I play Bus Simulator Indonesia on PC?
    A: No, Bus Simulator Indonesia is only available for Android devices. However, you can use an Android emulator on your PC to play the game.
  10. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/City Block APK A Free Offline and Ad-Free Town Simulation Game for Android.md b/spaces/congsaPfin/Manga-OCR/logs/City Block APK A Free Offline and Ad-Free Town Simulation Game for Android.md deleted file mode 100644 index 9ac6c20fcb7eeeb493fc155f59be84413ab73980..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/City Block APK A Free Offline and Ad-Free Town Simulation Game for Android.md +++ /dev/null @@ -1,145 +0,0 @@ - -

City Block APK: A Fun and Free Town Simulation Game for Android

-

Do you love driving in a big pixel car playmat and exploring different professions in a town? If yes, then you should try City Block APK, a town simulation game for Android devices. In this game, you can play as a policeman, firefighter, ambulance driver, farmer, or taxi driver and complete various missions in an open world 3D game. You can also take a break and race through the roads and streets of the town on a motorcycle, kickbike or skateboard, play some football or ride a sheep! City Block APK is a game that will keep you entertained for hours with its fun and engaging gameplay.

-

What is City Block APK?

-

City Block APK is a game developed by Mkay Games, a small indie studio based in Finland. The game was released in 2020 and has since gained over 5 million downloads on Google Play Store. The game is inspired by the early auto theft games, where you can drive around in different vehicles and cause chaos in the city. However, City Block APK is more family-friendly and suitable for kids, as it does not involve violence or crime. Instead, it focuses on helping the town and its people with various tasks and challenges.

-

city block apk


Download Zip ··· https://urlca.com/2uOeT2



-

Features of City Block APK

-

City Block APK has many features that make it an enjoyable and addictive game. Here are some of them:

-

Police car

-

As a policeman, you can protect and serve the town by arresting thieves and robbers in high speed car chases, and finding the missing children. You can also use your siren and lights to clear the traffic and get to your destination faster.

-

Fire truck

-

As a firefighter, you can extinguish fires and save people's homes. You can also use your water cannon to spray water on anything you want, such as cars, buildings, or even sheep!

-

Ambulance

-

As an ambulance driver, you can take the injured people to the hospital as fast as possible. You can also use your medical kit to heal yourself or others.

-

Garbage truck

-

As a garbage truck driver, you can keep your town clean by collecting the trash from different locations. You can also dump the trash anywhere you want, such as on the road, on the grass, or even on other vehicles!

-

Tractor

-

As a farmer, you can plough, sow and harvest the wheat of the farm, while keeping the sheep out of the carrot land. You can also use your trailer to transport goods or animals.

-

city block game apk
-city block simulator apk
-city block sandbox apk
-city block driving apk
-city block pixel apk
-city block mod apk
-city block apk download
-city block apk free
-city block apk latest version
-city block apk offline
-city block apk for android
-city block apk mob.org
-city block apk apkcombo
-city block apk pure
-city block apk uptodown
-city block 3d apk
-city block builder apk
-city block craft apk
-city block car apk
-city block police apk
-city block fire truck apk
-city block ambulance apk
-city block garbage truck apk
-city block tractor apk
-city block taxi apk
-city block adventure apk
-city block action apk
-city block open world apk
-city block isometric apk
-city block auto theft apk
-city block town simulation apk
-city block pixel car playmat apk
-city block kid game apk
-city block fun game apk
-city block no ads apk
-city block no internet apk
-city block tiny download size apk
-city block mkay games apk
-download game android city block modded unlimited money and gems full unlocked all features premium from moddedapk.net
-download game android offline terbaik 2023 keren seru dan ringan ukuran kecil grafik hd gratis tanpa iklan dan internet di moddedapk.net
-download game android simulator kota pixel dengan gameplay mirip gta awal di moddedapk.net
-download game android petualangan anak yang membantu kotanya dengan berbagai profesi di moddedapk.net
-download game android aksi yang menantang dengan mengendarai mobil polisi pemadam kebakaran ambulans traktor dan taksi di moddedapk.net
-download game android yang cocok untuk anak-anak dan keluarga dengan grafik pixel yang lucu dan imut di moddedapk.net
-download game android yang tidak membutuhkan koneksi internet dan tidak ada iklan atau pembelian dalam aplikasi di moddedapk.net
-download game android yang memiliki ukuran unduhan yang sangat kecil dan hemat ruang penyimpanan di moddedapk.net
-download game android yang dikembangkan oleh mkay games yang terkenal dengan game-game pixel yang menyenangkan dan menarik di moddedapk.net

-

Taxi

-

As a taxi driver, you can take your customers to their destinations, they will reward you for speed. You can also use your horn to honk at other drivers or pedestrians.

-

How to download and install City Block APK?

-

To download and install City Block APK on your Android device, you need to follow these steps:

-
    -
  1. Go to [APKCombo](^1^), a website that provides free APK downloads for Android games and apps.
  2. -
  3. Type "city block apk" in the search box and click on the first result.
  4. -
  5. Click on the "Download APK" button and choose a version that is compatible with your device.
  6. -
  7. Wait for the download to finish and then open the file.
  8. -
  9. Allow installation from unknown sources if prompted by your device settings.
  10. -
  11. Follow the instructions on the screen to install the game.
  12. -
  13. Enjoy playing City Block APK!
  14. -
-

Pros and cons of City

Pros and cons of City Block APK

-

City Block APK is a game that has many advantages and disadvantages. Here are some of them:

- - - - - - - - - - - - - - - - - - - - - - - - - -
ProsCons
- It is free to download and play.- It contains ads that may be annoying or intrusive.
- It has colorful and pixelated graphics that create a retro and nostalgic vibe.- It has low-quality sound effects and music that may be repetitive or boring.
- It has a variety of vehicles and professions to choose from, each with its own gameplay and missions.- It has a limited map size and content, which may make the game feel repetitive or boring after a while.
- It has a simple and intuitive control system that is easy to use.- It has some bugs and glitches that may affect the game performance or experience.
- It is suitable for kids and adults, as it does not involve violence or crime.- It does not have a multiplayer mode or online features, which may reduce the game's replay value or social aspect.
-

Why play City Block APK?

-

City Block APK is a game that can provide you with hours of fun and entertainment. Here are some reasons why you should play it:

-

Benefits of playing town simulation games

-

Town simulation games are games that allow you to create, manage, or explore a virtual town or city. They can have many benefits for your mental and physical health, such as:

-
    -
  • They can improve your creativity and imagination, as you can design your own town or city according to your preferences and style.
  • -
  • They can enhance your problem-solving and decision-making skills, as you can face various challenges and situations that require you to think critically and strategically.
  • -
  • They can boost your memory and concentration, as you can remember the details and features of your town or city, and focus on your tasks and goals.
  • -
  • They can reduce your stress and anxiety, as you can escape from reality and immerse yourself in a relaxing and enjoyable environment.
  • -
  • They can increase your social skills and communication, as you can interact with other players or characters in the game, and share your experiences and opinions with them.
  • -
-

Tips and tricks for playing City Block APK

-

If you want to have a better gaming experience with City Block APK, here are some tips and tricks that you can use:

-
    -
  • Explore the town and find hidden items, such as coins, stars, keys, or chests. You can use them to unlock new vehicles, skins, or features in the game.
  • -
  • Complete the missions and challenges that are given to you by the people in the town. You can earn rewards, such as money, points, or trophies for doing so.
  • -
  • Customize your vehicles and characters according to your liking. You can change their color, shape, size, or accessories in the garage or the shop.
  • -
  • Use the map to navigate the town and find your destination. You can also use the mini-map on the top right corner of the screen to see your location and direction.
  • -
  • Be careful when driving on the road or crossing the street. You can cause accidents or damage to yourself or others if you are not careful. You can also get fined by the police if you break the traffic rules.
  • -
-

Conclusion

-

City Block APK is a fun and free town simulation game for Android devices. You can play as different professions in a town and complete various missions in an open world 3D game. You can also enjoy driving in different vehicles and exploring the town at your own pace. City Block APK is a game that can improve your creativity, problem-solving, memory, concentration, stress relief, social skills, and communication. It is also suitable for kids and adults alike. If you are looking for a game that will keep you entertained for hours, then you should download City Block APK today!

-

Frequently Asked Questions (FAQs)

-

Here are some of the most common questions that people ask about City Block APK:

-
    -
  1. Is City Block APK safe to download?
  2. -

    Yes, City Block APK is safe to download from [APKCombo], as it does not contain any viruses or malware. However, you should always be careful when downloading any APK file from unknown sources, as they may harm your device or data.

    What are the requirements for playing City Block APK? -

    City Block APK requires Android 4.4 or higher to run smoothly. It also requires about 100 MB of free storage space on your device. You can check your device's compatibility and specifications before downloading the game.

    -
  3. How can I remove the ads from City Block APK?
  4. -

    City Block APK is a free game that is supported by ads. However, if you find the ads annoying or intrusive, you can remove them by purchasing the ad-free version of the game for $1.99. You can also use an ad-blocker app or a VPN app to block the ads from appearing on your screen.

    -
  5. Can I play City Block APK offline?
  6. -

    Yes, you can play City Block APK offline without an internet connection. However, you may not be able to access some of the features or updates that require online connectivity, such as leaderboards, achievements, or new content.

    -
  7. Can I play City Block APK with my friends?
  8. -

    No, City Block APK does not have a multiplayer mode or online features that allow you to play with your friends. However, you can still share your screenshots or videos of your gameplay with your friends on social media platforms, such as Facebook, Instagram, or YouTube.

    -
  9. Where can I get more information or support for City Block APK?
  10. -

    If you have any questions, feedback, or issues regarding City Block APK, you can contact the developer of the game through their email address: mkaygames@gmail.com. You can also visit their website: [Mkay Games] or their Facebook page: [Mkay Games] for more information or support.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Deadpool 2 Hindi Dubbed Download - Dont Miss the Funniest Superhero Movie Ever.md b/spaces/congsaPfin/Manga-OCR/logs/Deadpool 2 Hindi Dubbed Download - Dont Miss the Funniest Superhero Movie Ever.md deleted file mode 100644 index d7851d9883140522ac98a248a0a1f44c66156d2f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Deadpool 2 Hindi Dubbed Download - Dont Miss the Funniest Superhero Movie Ever.md +++ /dev/null @@ -1,142 +0,0 @@ -
-

Download Deadpool 2 Full Movie in Hindi Filmyzilla: A Risky and Illegal Way to Watch the Superhero Sequel

-

If you are a fan of superhero movies, you might have heard of Deadpool 2, the sequel to the hit 2016 film Deadpool. The movie features Ryan Reynolds as the witty and irreverent mercenary Wade Wilson, who teams up with other mutants to protect a young boy from a time-traveling cyborg named Cable. The movie is full of action, comedy, and references to pop culture, making it one of the most entertaining and successful films of 2018. But how can you watch Deadpool 2 online, especially if you want to enjoy it in Hindi? You might have come across a website called Filmyzilla, which claims to offer Deadpool 2 full movie in Hindi for free download. But is this website safe and legal? In this article, we will tell you everything you need to know about Filmyzilla, its risks and drawbacks, and the best alternatives to watch Deadpool 2 legally and safely.

-

What is Deadpool 2 and why is it popular?

-

A brief summary of the movie plot and characters

-

Deadpool 2 is a superhero film based on the Marvel Comics character Deadpool, who is also known as the Merc with a Mouth. The film follows Wade Wilson, a former special forces operative who becomes Deadpool after undergoing a rogue experiment that gives him superhuman healing abilities. After losing his girlfriend Vanessa in a tragic incident, Wade joins forces with Colossus, Negasonic Teenage Warhead, Domino, and other mutants to form the X-Force, a team of unconventional heroes. Their mission is to stop Cable, a futuristic soldier who travels back in time to kill Russell, a young mutant with destructive powers. Along the way, Wade learns to cope with his loss, find his purpose, and embrace his family.

-

download deadpool 2 full movie in hindi filmyzilla


Download Zip ❤❤❤ https://urlca.com/2uO8jD



-

The critical and commercial success of the movie

-

Deadpool 2 was released on May 18, 2018, in the United States. The film received positive reviews from critics and audiences alike, who praised its humor, cast performances, story, and action sequences. The film also broke several box office records, becoming the ninth-highest-grossing film of 2018, the highest-grossing film in the X-Men series, and the highest-grossing R-rated film at the time. The film also spawned a PG-13-rated version called Once Upon a Deadpool, which was released in December 2018. A sequel, Deadpool 3, which will integrate the character into the Marvel Cinematic Universe (MCU), is set for release on May 3, 2024.

-

What is Filmyzilla and how does it work?

-

A public torrent website that leaks movies illegally

-

Filmyzilla is

Filmyzilla is a public torrent website that leaks movies illegally on its platform. The website offers a wide range of movies from various genres, languages, and regions, such as Hollywood, Bollywood, Tollywood, Kollywood, and more. The website also provides dubbed versions of movies in different languages, such as Hindi, Tamil, Telugu, Malayalam, Kannada, and others. The users can download the movies for free from the website without any registration or subscription. The website also updates its content regularly with the latest releases and leaks.

-

The categories and formats of movies available on Filmyzilla

-

The website has various categories of movies to choose from, such as action, adventure, comedy, drama, horror, thriller, sci-fi, fantasy, animation, and more. The users can also search for movies by year, quality, or language. The website offers different formats and resolutions of movies to download, such as 360p, 480p, 720p, 1080p, HD, Full HD, and BluRay. The website also provides the file size and duration of each movie on its page.

-

What are the risks and drawbacks of downloading movies from Filmyzilla?

-

The legal consequences of piracy and copyright infringement

-

Downloading movies from Filmyzilla is not only risky but also illegal. Piracy and copyright infringement are serious offenses that can lead to legal actions and penalties. The website violates the Indian Copyright Act of 1957 and the Information Technology Act of 2000 by distributing pirated content without the permission of the original owners. The website also infringes on the intellectual property rights of the filmmakers and producers who invest a lot of money and effort in making the movies. The government of India and

The government of India and the Department of Telecommunications (DoT) have taken several measures to block and ban Filmyzilla and other similar websites. The website also faces legal actions from the film industry associations and bodies, such as the Motion Picture Association of America (MPAA), the Indian Film and Television Producers Council (IFTPC), and the Federation of Indian Chambers of Commerce and Industry (FICCI). The users who download or share movies from Filmyzilla can also face legal troubles, such as fines, imprisonment, or both.

-

The potential malware and viruses that can harm your device

-

Another risk of downloading movies from Filmyzilla is the possibility of infecting your device with malware and viruses. The website is not secure and does not have any encryption or protection. The website also hosts various pop-up ads, banners, and redirects that can expose your device to malicious software and hackers. These malware and viruses can damage your device, steal your personal information, access your files, or monitor your online activities. They can also compromise your network security and affect other devices connected to it.

-

The poor quality and accuracy of the dubbed versions

-

One of the reasons why some people might want to download Deadpool 2 full movie in Hindi from Filmyzilla is to enjoy the movie in their native language. However, this can also be a drawback, as the dubbed versions of movies on Filmyzilla are often of poor quality and accuracy. The website does not have any professional or authorized dubbing artists or translators. The website relies on automated software or amateur volunteers to create the dubbed versions. This can result in errors, mistakes, mispronunciations, or loss of meaning in the dialogues and subtitles. The dubbed versions can also ruin the original tone, humor, and emotion of the movie.

-

What are the best alternatives to Filmyzilla for watching Deadpool 2 legally and safely?

-

The official streaming platforms that offer Deadpool 2 online

-

The best way to watch Deadpool 2 online is to use the official streaming platforms that have the rights to stream the movie legally and safely. Some of these platforms are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PlatformPriceAvailability
Disney+ HotstarRs. 299 per month or Rs. 1499 per year for Premium subscriptionIndia
Hulu$5.99 per month or $59.99 per year for Basic plan with ads; $11.99 per month or $119.99 per year for No Ads plan; $64.99 per month or $719.88 per year for Live TV planUnited States
Amazon Prime Video$12.99 per month or $119 per year for Prime membership; Rs. 129 per month or Rs. 999 per year for Prime membership; £7.99 per month or £79 per year for Prime membership; €7.99 per month or €69 per year for Prime membershipUnited States, India, United Kingdom, Germany, France, and other countries
YouTube Movies$3.99 to rent; $14.99 to buyUnited States, Canada, United Kingdom, Australia, and other countries
Google Play Movies & TV$3.99 to rent; $14.99 to buyUnited States, Canada, United Kingdom, Australia, and other countries
iTunes$3.99 to rent; $14.99 to buyUnited States, Canada, United Kingdom, Australia, and other countries
Vudu$3.99 to rent; $14.99 to buyUnited States and Canada
FandangoNOW$3.99 to rent; $14.99 to buyUnited States
-

These platforms offer Deadpool 2 in high-quality video and audio, with options to choose the language, subtitles, and playback speed. They also have other features, such as parental controls, offline downloads, recommendations, and reviews. They also respect the rights and interests of the creators and actors of the movie, and support the film industry.

-

download deadpool 2 hindi dubbed full hd movie filmyzilla
-deadpool 2 full movie in hindi download 720p filmyzilla
-deadpool 2 hindi movie download filmyzilla 480p
-watch online deadpool 2 full movie in hindi filmyzilla
-deadpool 2 full movie in hindi free download filmyzilla
-deadpool 2 hindi dubbed movie download filmyzilla.com
-deadpool 2 full movie download in hindi 1080p filmyzilla
-deadpool 2 hindi movie filmyzilla mp4 download
-deadpool 2 full movie in hindi filmyzilla hd
-deadpool 2 hindi dubbed movie download 300mb filmyzilla
-deadpool 2 full movie in hindi download filmyzilla.in
-deadpool 2 hindi movie download filmyzilla.net
-deadpool 2 full movie in hindi download filmyzilla.org
-deadpool 2 hindi dubbed movie download filmyzilla.xyz
-deadpool 2 full movie in hindi download filmyzilla.co.in
-deadpool 2 hindi movie download moviespyhd.net
-deadpool 2 full movie in hindi moviespyhd.com download
-deadpool 2 hindi dubbed movie moviespyhd.co download
-deadpool 2 full movie in hindi moviespyhd.in download
-deadpool 2 hindi movie moviespyhd.org download
-deadpool 2 full movie in hindi moviespyhd.xyz download
-deadpool 2 hindi dubbed movie moviespyhd.co.in download
-deadpool 2 full movie in hindi thecomicuniverse.in download
-deadpool 2 hindi dubbed movie thecomicuniverse.com download
-deadpool 2 full movie in hindi thecomicuniverse.co download
-deadpool 2 hindi dubbed movie thecomicuniverse.net download
-deadpool 2 full movie in hindi thecomicuniverse.org download
-deadpool 2 hindi dubbed movie thecomicuniverse.xyz download
-deadpool 2 full movie in hindi thecomicuniverse.co.in download
-how to download deadpool 2 full movie in hindi filmyzilla
-where to download deadpool 2 full movie in hindi filmyzilla
-best site to download deadpool 2 full movie in hindi filmyzilla
-is it safe to download deadpool 2 full movie in hindi filmyzilla
-is it legal to download deadpool 2 full movie in hindi filmyzilla
-is it possible to download deadpool 2 full movie in hindi filmyzilla
-why to download deadpool 2 full movie in hindi filmyzilla
-when to download deadpool 2 full movie in hindi filmyzilla
-what to do after downloading deadpool 2 full movie in hindi filmyzilla
-what to watch before downloading deadpool 2 full movie in hindi filmyzilla
-what to expect from downloading deadpool 2 full movie in hindi filmyzilla

-

The benefits of subscribing to these platforms

-

By subscribing to these platforms, you can enjoy many benefits, such as:

-
    -
  • Access to a vast library of movies, shows, documentaries, and originals across various genres and languages.
  • -
  • Ability to watch the content on multiple devices, such as smartphones, tablets, laptops, smart TVs, and gaming consoles.
  • -
  • Flexibility to cancel or change your subscription plan at any time.
  • -
  • Value for money and convenience, as you can watch unlimited content for a fixed monthly or yearly fee.
  • -
  • Security and privacy, as these platforms use encryption and authentication to protect your data and identity.
  • -
-

Conclusion

-

Deadpool 2 is a fun and exciting movie that you can watch online legally and safely. However, downloading Deadpool 2 full movie in Hindi from Filmyzilla is not a good idea, as it is risky and illegal. Filmyzilla is a public torrent website that leaks movies without the permission of the owners. The website can expose you to legal troubles, malware attacks, and poor quality dubbed versions. The best alternatives to Filmyzilla are the official streaming platforms that offer Deadpool 2 online for a reasonable price. These platforms provide high-quality video and audio, with options to choose the language, subtitles, and playback speed. They also respect the rights and interests of the creators and actors of the movie, and support the film industry. By subscribing to these platforms, you can enjoy many benefits, such as access to a vast library of content, flexibility to cancel or change your plan, value for money and convenience, security and privacy, and more. So, what are you waiting for? Grab your popcorn and enjoy Deadpool 2 online legally and safely!

-

FAQs

-

Is Deadpool 2 available on Netflix or Amazon Prime Video?

-

No, Deadpool 2 is not available on Netflix or Amazon Prime Video in most countries. However, you can watch Deadpool 2 on Disney+ Hotstar in India, Hulu in the United States, or Amazon Prime Video in some other countries.

-

Is Deadpool 2 suitable for children or family viewing?

-

No, Deadpool 2 is not suitable for children or family viewing. The movie is rated R in the United States, A in India, 15 in the United Kingdom, and 16 in Germany. The movie contains strong violence, gore, profanity, sexual references, drug use, and adult humor.

-

How can I download Deadpool 2 legally and offline?

-

You can download Deadpool 2 legally and offline by using the official streaming platforms that offer this feature. For example, you can download Deadpool 2 on Disney+ Hotstar in India or Hulu in the United States. However, you need to have an active subscription plan and enough storage space on your device. You also need to follow the terms and conditions of the platform regarding the download limit, expiry date, and offline viewing.

-

How can I avoid phishing or scam websites that claim to offer Deadpool 2 for free?

-

You can avoid phishing or scam websites that claim to offer Deadpool 2 for free by following these tips:

-
    -
  • Do not click on any suspicious links or pop-ups that appear on your browser or email.
  • -
  • Do not enter any personal or financial information on any website that does not have a secure connection (https) or a verified domain name.
  • -
  • Do not download any software or app from any website that is not authorized or trusted.
  • -
  • Do not trust any website that promises to offer Deadpool 2 for free without any registration or subscription.
  • -
  • Do use a reliable antivirus software and firewall to protect your device from malware and viruses.
  • -
-

How can I support the creators and actors of Deadpool 2?

-

You can support the creators and actors of Deadpool 2 by watching the movie legally and safely on the official streaming platforms. You can also buy or rent the movie from authorized sources. You can also follow the social media accounts of the creators and actors of Deadpool 2 and share your feedback and appreciation. You can also buy merchandise related to Deadpool 2 from licensed vendors.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/squeeze_excitation.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/squeeze_excitation.py deleted file mode 100644 index d1d902bb30c071acbc0fa919a134c80fed86bd6c..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/squeeze_excitation.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch.nn as nn - - -class SELayer(nn.Module): - def __init__(self, channel, reduction=16): - super(SELayer, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction, bias=False), - nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel, bias=False), - nn.Sigmoid() - ) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - res = x * y.expand_as(x) - return res diff --git a/spaces/cozyanduofen/bingo/src/lib/bots/bing/utils.ts b/spaces/cozyanduofen/bingo/src/lib/bots/bing/utils.ts deleted file mode 100644 index 64b4b96452d125346b0fc4436b5f7c18c962df0b..0000000000000000000000000000000000000000 --- a/spaces/cozyanduofen/bingo/src/lib/bots/bing/utils.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { ChatResponseMessage, BingChatResponse } from './types' - -export function convertMessageToMarkdown(message: ChatResponseMessage): string { - if (message.messageType === 'InternalSearchQuery') { - return message.text - } - for (const card of message.adaptiveCards??[]) { - for (const block of card.body) { - if (block.type === 'TextBlock') { - return block.text - } - } - } - return '' -} - -const RecordSeparator = String.fromCharCode(30) - -export const websocketUtils = { - packMessage(data: any) { - return `${JSON.stringify(data)}${RecordSeparator}` - }, - unpackMessage(data: string | ArrayBuffer | Blob) { - if (!data) return {} - return data - .toString() - .split(RecordSeparator) - .filter(Boolean) - .map((s) => { - try { - return JSON.parse(s) - } catch (e) { - return {} - } - }) - }, -} - -export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise { - const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`, - { - method: 'HEAD', - headers, - redirect: 'manual' - }, - ); - - if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) { - throw new Error('请求异常,请检查 cookie 是否有效') - } - - const resultId = RegExp.$1; - let count = 0 - const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`; - - do { - await sleep(3000); - const content = await fetch(imageThumbUrl, { headers, method: 'GET' }) - - // @ts-ignore - if (content.headers.get('content-length') > 1) { - const text = await content.text() - return (text?.match(/ target?.split('src="').pop()?.replace(/&/g, '&')) - .map(img => `![${prompt}](${img})`).join(' ') - } - } while(count ++ < 10); -} - - -export async function* streamAsyncIterable(stream: ReadableStream) { - const reader = stream.getReader() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - return - } - yield value - } - } finally { - reader.releaseLock() - } -} - -export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) - diff --git a/spaces/crylake/img2poem/query2labels/lib/dataset/cocodataset.py b/spaces/crylake/img2poem/query2labels/lib/dataset/cocodataset.py deleted file mode 100644 index f4b9a9de30252d2146dee43f990aaa9fd820ff1a..0000000000000000000000000000000000000000 --- a/spaces/crylake/img2poem/query2labels/lib/dataset/cocodataset.py +++ /dev/null @@ -1,78 +0,0 @@ -import torch -import sys, os - -import torchvision.datasets as dset -import torchvision.transforms as transforms -import torch.utils.data as data -from PIL import Image -import numpy as np -import json -import random -from tqdm import tqdm - -category_map = {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19, "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29, "30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39, "40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49, "50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59, "60": 60, "61": 61, "62": 62, "63": 63, "64": 64, "65": 65, "66": 66, "67": 67, "68": 68, "69": 69, "70": 70, "71": 71, "72": 72, "73": 73, "74": 74, "75": 75} - -# category_map = {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "10": 10, "11": 11, "13": 12, "14": 13, "15": 14, "16": 15, "17": 16, "18": 17, "19": 18, "20": 19, "21": 20, "22": 21, "23": 22, "24": 23, "25": 24, "27": 25, "28": 26, "31": 27, "32": 28, "33": 29, "34": 30, "35": 31, "36": 32, "37": 33, "38": 34, "39": 35, "40": 36, "41": 37, "42": 38, "43": 39, "44": 40, "46": 41, "47": 42, "48": 43, "49": 44, "50": 45, "51": 46, "52": 47, "53": 48, "54": 49, "55": 50, "56": 51, "57": 52, "58": 53, "59": 54, "60": 55, "61": 56, "62": 57, "63": 58, "64": 59, "65": 60, "67": 61, "70": 62, "72": 63, "73": 64, "74": 65, "75": 66, "76": 67, "77": 68, "78": 69, "79": 70, "80": 71, "81": 72, "82": 73, "84": 74, "85": 75, "86": 76, "87": 77, "88": 78, "89": 79, "90": 80} - -class CoCoDataset(data.Dataset): - def __init__(self, image_dir, anno_path, input_transform=None, - labels_path=None, - used_category=-1): - self.coco = dset.CocoDetection(root=image_dir, annFile=anno_path) - # with open('./data/coco/category.json','r') as load_category: - # self.category_map = json.load(load_category) - self.category_map = category_map - self.input_transform = input_transform - self.labels_path = labels_path - self.used_category = used_category - - self.labels = [] - if os.path.exists(self.labels_path): - self.labels = np.load(self.labels_path).astype(np.float64) - self.labels = (self.labels > 0).astype(np.float64) - else: - print("No preprocessed label file found in {}.".format(self.labels_path)) - l = len(self.coco) - for i in tqdm(range(l)): - item = self.coco[i] - # print(i) - categories = self.getCategoryList(item[1]) - label = self.getLabelVector(categories) - self.labels.append(label) - self.save_datalabels(labels_path) - # import ipdb; ipdb.set_trace() - - def __getitem__(self, index): - input = self.coco[index][0] - if self.input_transform: - input = self.input_transform(input) - return input, self.labels[index] - - - def getCategoryList(self, item): - categories = set() - for t in item: - categories.add(t['category_id']) - return list(categories) - - def getLabelVector(self, categories): - label = np.zeros(76) - # label_num = len(categories) - for c in categories: - index = self.category_map[str(c)]-1 - label[index] = 1.0 # / label_num - return label - - def __len__(self): - return len(self.coco) - - def save_datalabels(self, outpath): - """ - Save datalabels to disk. - For faster loading next time. - """ - os.makedirs(os.path.dirname(outpath), exist_ok=True) - labels = np.array(self.labels) - np.save(outpath, labels) - - diff --git a/spaces/cymic/Waifu_Diffusion_Webui/modules/styles.py b/spaces/cymic/Waifu_Diffusion_Webui/modules/styles.py deleted file mode 100644 index fd054e03d74cdbdecc2bc95ad0db43e811f392d4..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/modules/styles.py +++ /dev/null @@ -1,92 +0,0 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - -import csv -import os -import os.path -import typing -import collections.abc as abc -import tempfile -import shutil - -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - - -class PromptStyle(typing.NamedTuple): - name: str - prompt: str - negative_prompt: str - - -def merge_prompts(style_prompt: str, prompt: str) -> str: - if "{prompt}" in style_prompt: - res = style_prompt.replace("{prompt}", prompt) - else: - parts = filter(None, (prompt.strip(), style_prompt.strip())) - res = ", ".join(parts) - - return res - - -def apply_styles_to_prompt(prompt, styles): - for style in styles: - prompt = merge_prompts(style, prompt) - - return prompt - - -class StyleDatabase: - def __init__(self, path: str): - self.no_style = PromptStyle("None", "", "") - self.styles = {"None": self.no_style} - - if not os.path.exists(path): - return - - with open(path, "r", encoding="utf8", newline='') as file: - reader = csv.DictReader(file) - for row in reader: - # Support loading old CSV format with "name, text"-columns - prompt = row["prompt"] if "prompt" in row else row["text"] - negative_prompt = row.get("negative_prompt", "") - self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt) - - def get_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).prompt for x in styles] - - def get_negative_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).negative_prompt for x in styles] - - def apply_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles]) - - def apply_negative_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) - - def apply_styles(self, p: StableDiffusionProcessing) -> None: - if isinstance(p.prompt, list): - p.prompt = [self.apply_styles_to_prompt(prompt, p.styles) for prompt in p.prompt] - else: - p.prompt = self.apply_styles_to_prompt(p.prompt, p.styles) - - if isinstance(p.negative_prompt, list): - p.negative_prompt = [self.apply_negative_styles_to_prompt(prompt, p.styles) for prompt in p.negative_prompt] - else: - p.negative_prompt = self.apply_negative_styles_to_prompt(p.negative_prompt, p.styles) - - def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") - with os.fdopen(fd, "w", encoding="utf8", newline='') as file: - # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, - # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() - writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) - writer.writeheader() - writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) diff --git a/spaces/damo-vilab/MS-Vid2Vid-XL-demo/Dockerfile b/spaces/damo-vilab/MS-Vid2Vid-XL-demo/Dockerfile deleted file mode 100644 index 61a0cf944564785454c00e37afc972b44300738b..0000000000000000000000000000000000000000 --- a/spaces/damo-vilab/MS-Vid2Vid-XL-demo/Dockerfile +++ /dev/null @@ -1,61 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - git \ - git-lfs \ - wget \ - curl \ - # python build dependencies \ - build-essential \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libxml2-dev \ - libxmlsec1-dev \ - libffi-dev \ - liblzma-dev \ - # gradio dependencies \ - ffmpeg && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -RUN useradd -m -u 1000 user -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:${PATH} -WORKDIR ${HOME}/app - -RUN curl https://pyenv.run | bash -ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH} -ARG PYTHON_VERSION=3.10.12 -RUN pyenv install ${PYTHON_VERSION} && \ - pyenv global ${PYTHON_VERSION} && \ - pyenv rehash && \ - pip install --no-cache-dir -U pip setuptools wheel - -COPY --chown=1000 ./requirements.txt /tmp/requirements.txt -RUN pip install --no-cache-dir --upgrade -r /tmp/requirements.txt - -RUN git clone https://github.com/modelscope/modelscope && \ - cd modelscope && \ - pip install -r requirements.txt && \ - pip install . && \ - cd .. && \ - rm -rf modelscope - -COPY --chown=1000 . ${HOME}/app -ENV PYTHONPATH=${HOME}/app \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces -CMD ["python", "app.py"] diff --git a/spaces/danterivers/music-generation-samples/README.md b/spaces/danterivers/music-generation-samples/README.md deleted file mode 100644 index 8a65ad6e8800a446acca376e3699a7f9e80241be..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/README.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: MusicGen -python_version: '3.9' -tags: -- music generation -- language models -- LLMs -app_file: app.py -emoji: 🎵 -colorFrom: white -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -pinned: true -suggested_hardware: a10g-small -license: cc-by-nc-4.0 -duplicated_from: musicgen/MusicGen ---- -# Audiocraft -![docs badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_docs/badge.svg) -![linter badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_linter/badge.svg) -![tests badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_tests/badge.svg) - -Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model. - -## MusicGen - -Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive -Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't not require a self-supervised semantic representation, and it generates -all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict -them in parallel, thus having only 50 auto-regressive steps per second of audio. -Check out our [sample page][musicgen_samples] or test the available demo! - - - Open In Colab - - - Open in HugginFace - -
- -## Installation -Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following: - -```shell -# Best to make sure you have torch installed first, in particular before installing xformers. -# Don't run this if you already have PyTorch installed. -pip install 'torch>=2.0' -# Then proceed to one of the following -pip install -U audiocraft # stable release -pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft # bleeding edge -pip install -e . # or if you cloned the repo locally -``` - -## Usage -You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally, or use the provided [colab notebook](https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing). Finally, a demo is also available on the [`facebook/MusiGen` HugginFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support). - -## API - -We provide a simple API and 4 pre-trained models. The pre trained models are: -- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small) -- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium) -- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody) -- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large) - -We observe the best trade-off between quality and compute with the `medium` or `melody` model. -In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller -GPUs will be able to generate short sequences, or longer sequences with the `small` model. - -**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`. -You can install it with: -``` -apt get install ffmpeg -``` - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import MusicGen -from audiocraft.data.audio import audio_write - -model = MusicGen.get_pretrained('melody') -model.set_generation_params(duration=8) # generate 8 seconds. -wav = model.generate_unconditional(4) # generates 4 unconditional audio samples -descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] -wav = model.generate(descriptions) # generates 3 samples. - -melody, sr = torchaudio.load('./assets/bach.mp3') -# generates using the melody from the given audio and the provided descriptions. -wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr) - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness") -``` - - -## Model Card - -See [the model card page](./MODEL_CARD.md). - -## FAQ - -#### Will the training code be released? - -Yes. We will soon release the training code for MusicGen and EnCodec. - - -## Citation -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - -## License -* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE). -* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights). - -[arxiv]: https://arxiv.org/abs/2306.05284 -[musicgen_samples]: https://ai.honu.io/papers/musicgen/ diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.c b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.c deleted file mode 100644 index 8c0167402efcfbd798a2d46743b758b2b6cd8ce0..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.c +++ /dev/null @@ -1,16973 +0,0 @@ -/* Generated by Cython 3.0.0 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "fontTools.feaLib.lexer", - "sources": [ - "Lib/fontTools/feaLib/lexer.py" - ] - }, - "module_name": "fontTools.feaLib.lexer" -} -END: Cython Metadata */ - -#ifndef PY_SSIZE_T_CLEAN -#define PY_SSIZE_T_CLEAN -#endif /* PY_SSIZE_T_CLEAN */ -#if defined(CYTHON_LIMITED_API) && 0 - #ifndef Py_LIMITED_API - #if CYTHON_LIMITED_API+0 > 0x03030000 - #define Py_LIMITED_API CYTHON_LIMITED_API - #else - #define Py_LIMITED_API 0x03030000 - #endif - #endif -#endif - -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.7+ or Python 3.3+. -#else -#define CYTHON_ABI "3_0_0" -#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI -#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." -#define CYTHON_HEX_VERSION 0x030000F0 -#define CYTHON_FUTURE_DIVISION 1 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #define HAVE_LONG_LONG -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#if defined(GRAALVM_PYTHON) - /* For very preliminary testing purposes. Most variables are set the same as PyPy. - The existence of this section does not imply that anything works or is even tested */ - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 1 - #define CYTHON_COMPILING_IN_NOGIL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 - #endif -#elif defined(PYPY_VERSION) - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #if PY_VERSION_HEX < 0x03090000 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00) - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 - #endif -#elif defined(CYTHON_LIMITED_API) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 1 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 0 - #undef CYTHON_CLINE_IN_TRACEBACK - #define CYTHON_CLINE_IN_TRACEBACK 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 1 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #endif - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 1 - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 - #endif -#elif defined(PY_NOGIL) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #ifndef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #ifndef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #endif - #ifndef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #ifndef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6) - #endif - #ifndef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #ifndef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #endif - #if PY_VERSION_HEX < 0x030400a1 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #elif !defined(CYTHON_USE_TP_FINALIZE) - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #if PY_VERSION_HEX < 0x030600B1 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #elif !defined(CYTHON_USE_DICT_VERSIONS) - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5) - #endif - #if PY_VERSION_HEX < 0x030700A3 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #elif !defined(CYTHON_USE_EXC_INFO_STACK) - #define CYTHON_USE_EXC_INFO_STACK 1 - #endif - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if !defined(CYTHON_VECTORCALL) -#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) -#endif -#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) -#if CYTHON_USE_PYLONG_INTERNALS - #if PY_MAJOR_VERSION < 3 - #include "longintrepr.h" - #endif - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED - #if defined(__cplusplus) - /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 - * but leads to warnings with -pedantic, since it is a C++17 feature */ - #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) - #if __has_cpp_attribute(maybe_unused) - #define CYTHON_UNUSED [[maybe_unused]] - #endif - #endif - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR - #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - #endif - #endif - #if _MSC_VER < 1300 - #ifdef _WIN64 - typedef unsigned long long __pyx_uintptr_t; - #else - typedef unsigned int __pyx_uintptr_t; - #endif - #else - #ifdef _WIN64 - typedef unsigned __int64 __pyx_uintptr_t; - #else - typedef unsigned __int32 __pyx_uintptr_t; - #endif - #endif -#else - #include - typedef uintptr_t __pyx_uintptr_t; -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) - /* for clang __has_cpp_attribute(fallthrough) is true even before C++17 - * but leads to warnings with -pedantic, since it is a C++17 feature */ - #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif -#ifdef __cplusplus - template - struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; - #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value) -#else - #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0) -#endif -#if CYTHON_COMPILING_IN_PYPY == 1 - #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000) -#else - #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000) -#endif -#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer)) - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_DefaultClassType PyClass_Type - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" - #define __Pyx_DefaultClassType PyType_Type -#if PY_VERSION_HEX >= 0x030B00A1 - static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, - PyObject *code, PyObject *c, PyObject* n, PyObject *v, - PyObject *fv, PyObject *cell, PyObject* fn, - PyObject *name, int fline, PyObject *lnos) { - PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; - PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *empty=NULL; - const char *fn_cstr=NULL; - const char *name_cstr=NULL; - PyCodeObject *co=NULL, *result=NULL; - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (!(kwds=PyDict_New())) goto end; - if (!(argcount=PyLong_FromLong(a))) goto end; - if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; - if (!(posonlyargcount=PyLong_FromLong(p))) goto end; - if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; - if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; - if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; - if (!(nlocals=PyLong_FromLong(l))) goto end; - if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; - if (!(stacksize=PyLong_FromLong(s))) goto end; - if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; - if (!(flags=PyLong_FromLong(f))) goto end; - if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; - if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; - if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; - if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; - if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto end; - if (!(empty = PyTuple_New(0))) goto end; - result = (PyCodeObject*) PyObject_Call(replace, empty, kwds); - end: - Py_XDECREF((PyObject*) co); - Py_XDECREF(kwds); - Py_XDECREF(argcount); - Py_XDECREF(posonlyargcount); - Py_XDECREF(kwonlyargcount); - Py_XDECREF(nlocals); - Py_XDECREF(stacksize); - Py_XDECREF(replace); - Py_XDECREF(empty); - if (type) { - PyErr_Restore(type, value, traceback); - } - return result; - } -#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif -#endif -#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) - #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) -#else - #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is) - #define __Pyx_Py_Is(x, y) Py_Is(x, y) -#else - #define __Pyx_Py_Is(x, y) ((x) == (y)) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone) - #define __Pyx_Py_IsNone(ob) Py_IsNone(ob) -#else - #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue) - #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob) -#else - #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse) - #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob) -#else - #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False) -#endif -#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj)) -#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o) -#else - #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o) -#endif -#ifndef CO_COROUTINE - #define CO_COROUTINE 0x80 -#endif -#ifndef CO_ASYNC_GENERATOR - #define CO_ASYNC_GENERATOR 0x200 -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef Py_TPFLAGS_SEQUENCE - #define Py_TPFLAGS_SEQUENCE 0 -#endif -#ifndef Py_TPFLAGS_MAPPING - #define Py_TPFLAGS_MAPPING 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_METH_FASTCALL - #define __Pyx_METH_FASTCALL METH_FASTCALL - #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast - #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords -#else - #define __Pyx_METH_FASTCALL METH_VARARGS - #define __Pyx_PyCFunction_FastCall PyCFunction - #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords -#endif -#if CYTHON_VECTORCALL - #define __pyx_vectorcallfunc vectorcallfunc - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET - #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) -#elif CYTHON_BACKPORT_VECTORCALL - typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, - size_t nargsf, PyObject *kwnames); - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) -#else - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) -#endif -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) - typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); -#else - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) - #define __Pyx_PyCMethod PyCMethod -#endif -#ifndef METH_METHOD - #define METH_METHOD 0x200 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyThreadState_Current PyThreadState_Get() -#elif !CYTHON_FAST_THREAD_STATE - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) -{ - void *result; - result = PyModule_GetState(op); - if (!result) - Py_FatalError("Couldn't find the module state"); - return result; -} -#endif -#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) -#else - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if PY_MAJOR_VERSION < 3 - #if CYTHON_COMPILING_IN_PYPY - #if PYPY_VERSION_NUM < 0x07030600 - #if defined(__cplusplus) && __cplusplus >= 201402L - [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] - #elif defined(__GNUC__) || defined(__clang__) - __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) - #elif defined(_MSC_VER) - __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) - #endif - static CYTHON_INLINE int PyGILState_Check(void) { - return 0; - } - #else // PYPY_VERSION_NUM < 0x07030600 - #endif // PYPY_VERSION_NUM < 0x07030600 - #else - static CYTHON_INLINE int PyGILState_Check(void) { - PyThreadState * tstate = _PyThreadState_Current; - return tstate && (tstate == PyGILState_GetThisThreadState()); - } - #endif -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { - PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); - if (res == NULL) PyErr_Clear(); - return res; -} -#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) -#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#else -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { -#if CYTHON_COMPILING_IN_PYPY - return PyDict_GetItem(dict, name); -#else - PyDictEntry *ep; - PyDictObject *mp = (PyDictObject*) dict; - long hash = ((PyStringObject *) name)->ob_shash; - assert(hash != -1); - ep = (mp->ma_lookup)(mp, name, hash); - if (ep == NULL) { - return NULL; - } - return ep->me_value; -#endif -} -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#endif -#if CYTHON_USE_TYPE_SLOTS - #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) - #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) - #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) -#else - #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) - #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) - #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next -#endif -#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 -#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ - PyTypeObject *type = Py_TYPE(obj);\ - assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ - PyObject_GC_Del(obj);\ - Py_DECREF(type);\ -} -#else -#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U) - #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) - #define __Pyx_PyUnicode_DATA(u) ((void*)u) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) -#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #if PY_VERSION_HEX >= 0x030C0000 - #define __Pyx_PyUnicode_READY(op) (0) - #else - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #endif - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch) - #if PY_VERSION_HEX >= 0x030C0000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #else - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #endif - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U) - #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #if !defined(PyUnicode_DecodeUnicodeEscape) - #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) - #endif - #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) - #undef PyUnicode_Contains - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) - #endif - #if !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) - #endif - #if !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) - #endif -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#if CYTHON_COMPILING_IN_CPYTHON - #define __Pyx_PySequence_ListKeepNew(obj)\ - (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) -#else - #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define __Pyx_Py3Int_Check(op) PyLong_Check(op) - #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#else - #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op)) - #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op)) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) - #if !defined(_USE_MATH_DEFINES) - #define _USE_MATH_DEFINES - #endif -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifdef CYTHON_EXTERN_C - #undef __PYX_EXTERN_C - #define __PYX_EXTERN_C CYTHON_EXTERN_C -#elif defined(__PYX_EXTERN_C) - #ifdef _MSC_VER - #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") - #else - #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. - #endif -#else - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__fontTools__feaLib__lexer -#define __PYX_HAVE_API__fontTools__feaLib__lexer -/* Early includes */ -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u) -{ - const wchar_t *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#else -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) -{ - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#endif -#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o) -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #if PY_VERSION_HEX >= 0x030C00A7 - #ifndef _PyLong_SIGN_MASK - #define _PyLong_SIGN_MASK 3 - #endif - #ifndef _PyLong_NON_SIZE_BITS - #define _PyLong_NON_SIZE_BITS 3 - #endif - #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK) - #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0) - #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x)) - #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1) - #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0) - #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0]) - #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS)) - #define __Pyx_PyLong_SignedDigitCount(x)\ - ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x)) - #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue) - #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x) - #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x) - #else - #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS)) - #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0]) - #endif - typedef Py_ssize_t __Pyx_compact_pylong; - typedef size_t __Pyx_compact_upylong; - #else // Py < 3.12 - #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0) - #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0) - #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0) - #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0) - #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0]) - #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x)) - #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x) - #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1) - #define __Pyx_PyLong_CompactValue(x)\ - ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0])) - typedef sdigit __Pyx_compact_pylong; - typedef digit __Pyx_compact_upylong; - #endif - #if PY_VERSION_HEX >= 0x030C00A5 - #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit) - #else - #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit) - #endif -#endif -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = (char) c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_m = NULL; -#endif -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm = __FILE__; -static const char *__pyx_filename; - -/* #### Code section: filename_table ### */ - -static const char *__pyx_f[] = { - "Lib/fontTools/feaLib/lexer.py", -}; -/* #### Code section: utility_code_proto_before_types ### */ -/* #### Code section: numeric_typedefs ### */ -/* #### Code section: complex_type_declarations ### */ -/* #### Code section: type_declarations ### */ - -/*--- Type declarations ---*/ -/* #### Code section: utility_code_proto ### */ - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, Py_ssize_t); - void (*DECREF)(void*, PyObject*, Py_ssize_t); - void (*GOTREF)(void*, PyObject*, Py_ssize_t); - void (*GIVEREF)(void*, PyObject*, Py_ssize_t); - void* (*SetupContext)(const char*, Py_ssize_t, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - } - #define __Pyx_RefNannyFinishContextNogil() {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __Pyx_RefNannyFinishContext();\ - PyGILState_Release(__pyx_gilstate_save);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) - #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() -#endif - #define __Pyx_RefNannyFinishContextNogil() {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __Pyx_RefNannyFinishContext();\ - PyGILState_Release(__pyx_gilstate_save);\ - } - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContextNogil() - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_Py_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; Py_XDECREF(tmp);\ - } while (0) -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#if PY_VERSION_HEX >= 0x030C00A6 -#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL) -#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL) -#else -#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL) -#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type) -#endif -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL) -#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6 -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* TupleAndListFromArray.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); -static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); -#endif - -/* IncludeStringH.proto */ -#include - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* fastcall.proto */ -#define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) -#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) -#define __Pyx_KwValues_VARARGS(args, nargs) NULL -#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) -#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) -#if CYTHON_METH_FASTCALL - #define __Pyx_Arg_FASTCALL(args, i) args[i] - #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) - #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) - static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); - #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) -#else - #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS - #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS - #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS - #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS - #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS -#endif -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) -#else -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) -#endif - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, - const char* function_name); - -/* PyObjectSetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); -#else -#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) -#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) -#endif - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) do {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} while(0) -#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} while(0) -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#if !CYTHON_VECTORCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif -#if !CYTHON_VECTORCALL -#if PY_VERSION_HEX >= 0x03080000 - #include "frameobject.h" -#if PY_VERSION_HEX >= 0x030b00a6 - #ifndef Py_BUILD_CORE - #define Py_BUILD_CORE 1 - #endif - #include "internal/pycore_frame.h" -#endif - #define __Pxy_PyFrame_Initialize_Offsets() - #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) -#else - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif -#endif -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectFastCall.proto */ -#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* IterFinish.proto */ -static CYTHON_INLINE int __Pyx_IterFinish(void); - -/* UnpackItemEndCheck.proto */ -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* PyObjectCallNoArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* SliceObject.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( - PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** py_start, PyObject** py_stop, PyObject** py_slice, - int has_cstart, int has_cstop, int wraparound); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) -#endif - -/* PySequenceContains.proto */ -static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { - int result = PySequence_Contains(seq, item); - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/* PyUnicodeContains.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) { - int result = PyUnicode_Contains(text, substring); - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/* pybytes_as_double.proto */ -static double __Pyx_SlowPyString_AsDouble(PyObject *obj); -static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length); -static CYTHON_INLINE double __Pyx_PyBytes_AsDouble(PyObject *obj) { - return __Pyx__PyBytes_AsDouble(obj, PyBytes_AS_STRING(obj), PyBytes_GET_SIZE(obj)); -} -static CYTHON_INLINE double __Pyx_PyByteArray_AsDouble(PyObject *obj) { - return __Pyx__PyBytes_AsDouble(obj, PyByteArray_AS_STRING(obj), PyByteArray_GET_SIZE(obj)); -} - -/* pyunicode_as_double.proto */ -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY -static const char* __Pyx__PyUnicode_AsDouble_Copy(const void* data, const int kind, char* buffer, Py_ssize_t start, Py_ssize_t end) { - int last_was_punctuation; - Py_ssize_t i; - last_was_punctuation = 1; - for (i=start; i <= end; i++) { - Py_UCS4 chr = PyUnicode_READ(kind, data, i); - int is_punctuation = (chr == '_') | (chr == '.'); - *buffer = (char)chr; - buffer += (chr != '_'); - if (unlikely(chr > 127)) goto parse_failure; - if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; - last_was_punctuation = is_punctuation; - } - if (unlikely(last_was_punctuation)) goto parse_failure; - *buffer = '\0'; - return buffer; -parse_failure: - return NULL; -} -static double __Pyx__PyUnicode_AsDouble_inf_nan(const void* data, int kind, Py_ssize_t start, Py_ssize_t length) { - int matches = 1; - Py_UCS4 chr; - Py_UCS4 sign = PyUnicode_READ(kind, data, start); - int is_signed = (sign == '-') | (sign == '+'); - start += is_signed; - length -= is_signed; - switch (PyUnicode_READ(kind, data, start)) { - #ifdef Py_NAN - case 'n': - case 'N': - if (unlikely(length != 3)) goto parse_failure; - chr = PyUnicode_READ(kind, data, start+1); - matches &= (chr == 'a') | (chr == 'A'); - chr = PyUnicode_READ(kind, data, start+2); - matches &= (chr == 'n') | (chr == 'N'); - if (unlikely(!matches)) goto parse_failure; - return (sign == '-') ? -Py_NAN : Py_NAN; - #endif - case 'i': - case 'I': - if (unlikely(length < 3)) goto parse_failure; - chr = PyUnicode_READ(kind, data, start+1); - matches &= (chr == 'n') | (chr == 'N'); - chr = PyUnicode_READ(kind, data, start+2); - matches &= (chr == 'f') | (chr == 'F'); - if (likely(length == 3 && matches)) - return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; - if (unlikely(length != 8)) goto parse_failure; - chr = PyUnicode_READ(kind, data, start+3); - matches &= (chr == 'i') | (chr == 'I'); - chr = PyUnicode_READ(kind, data, start+4); - matches &= (chr == 'n') | (chr == 'N'); - chr = PyUnicode_READ(kind, data, start+5); - matches &= (chr == 'i') | (chr == 'I'); - chr = PyUnicode_READ(kind, data, start+6); - matches &= (chr == 't') | (chr == 'T'); - chr = PyUnicode_READ(kind, data, start+7); - matches &= (chr == 'y') | (chr == 'Y'); - if (unlikely(!matches)) goto parse_failure; - return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; - case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': - break; - default: - goto parse_failure; - } - return 0.0; -parse_failure: - return -1.0; -} -static double __Pyx_PyUnicode_AsDouble_WithSpaces(PyObject *obj) { - double value; - const char *last; - char *end; - Py_ssize_t start, length = PyUnicode_GET_LENGTH(obj); - const int kind = PyUnicode_KIND(obj); - const void* data = PyUnicode_DATA(obj); - start = 0; - while (Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, start))) - start++; - while (start < length - 1 && Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, length - 1))) - length--; - length -= start; - if (unlikely(length <= 0)) goto fallback; - value = __Pyx__PyUnicode_AsDouble_inf_nan(data, kind, start, length); - if (unlikely(value == -1.0)) goto fallback; - if (value != 0.0) return value; - if (length < 40) { - char number[40]; - last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); - if (unlikely(!last)) goto fallback; - value = PyOS_string_to_double(number, &end, NULL); - } else { - char *number = (char*) PyMem_Malloc((length + 1) * sizeof(char)); - if (unlikely(!number)) goto fallback; - last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); - if (unlikely(!last)) { - PyMem_Free(number); - goto fallback; - } - value = PyOS_string_to_double(number, &end, NULL); - PyMem_Free(number); - } - if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { - return value; - } -fallback: - return __Pyx_SlowPyString_AsDouble(obj); -} -#endif -static CYTHON_INLINE double __Pyx_PyUnicode_AsDouble(PyObject *obj) { -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY - if (unlikely(__Pyx_PyUnicode_READY(obj) == -1)) - return (double)-1; - if (likely(PyUnicode_IS_ASCII(obj))) { - const char *s; - Py_ssize_t length; - s = PyUnicode_AsUTF8AndSize(obj, &length); - return __Pyx__PyBytes_AsDouble(obj, s, length); - } - return __Pyx_PyUnicode_AsDouble_WithSpaces(obj); -#else - return __Pyx_SlowPyString_AsDouble(obj); -#endif -} - -/* pynumber_float.proto */ -static CYTHON_INLINE PyObject* __Pyx__PyNumber_Float(PyObject* obj); -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : __Pyx__PyNumber_Float(x)) - -/* IterNext.proto */ -#define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL) -static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject *, PyObject *); - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* PyObjectGetMethod.proto */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); - -/* PyObjectCallMethod0.proto */ -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); - -/* pop.proto */ -static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L); -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L); -#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ?\ - __Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L)) -#else -#define __Pyx_PyList_Pop(L) __Pyx__PyObject_Pop(L) -#define __Pyx_PyObject_Pop(L) __Pyx__PyObject_Pop(L) -#endif - -/* UnpackUnboundCMethod.proto */ -typedef struct { - PyObject *type; - PyObject **method_name; - PyCFunction func; - PyObject *method; - int flag; -} __Pyx_CachedCFunction; - -/* CallUnboundCMethod0.proto */ -static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_CallUnboundCMethod0(cfunc, self)\ - (likely((cfunc)->func) ?\ - (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) :\ - (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ?\ - (PY_VERSION_HEX >= 0x030700A0 ?\ - (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0) :\ - (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL)) :\ - (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ?\ - (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL) :\ - (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, __pyx_empty_tuple, NULL)) :\ - ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, __pyx_empty_tuple) :\ - __Pyx__CallUnboundCMethod0(cfunc, self)))))) :\ - __Pyx__CallUnboundCMethod0(cfunc, self)) -#else -#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) -#endif - -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* PyObjectCall2Args.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectCallMethod1.proto */ -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); - -/* append.proto */ -static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2) -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* HasAttr.proto */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); - -/* GetAttr3.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* ImportDottedModule.proto */ -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple); -#endif - -/* Py3UpdateBases.proto */ -static PyObject* __Pyx_PEP560_update_bases(PyObject *bases); - -/* CalculateMetaclass.proto */ -static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); - -/* SetNameInClass.proto */ -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 -#define __Pyx_SetNameInClass(ns, name, value)\ - (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) -#elif CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_SetNameInClass(ns, name, value)\ - (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) -#else -#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) -#endif - -/* IncludeStructmemberH.proto */ -#include - -/* FixUpExtensionType.proto */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); -#endif - -/* FetchSharedCythonModule.proto */ -static PyObject *__Pyx_FetchSharedCythonABIModule(void); - -/* FetchCommonType.proto */ -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); -#else -static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); -#endif - -/* PyMethodNew.proto */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { - CYTHON_UNUSED_VAR(typ); - if (!self) - return __Pyx_NewRef(func); - return PyMethod_New(func, self); -} -#else - #define __Pyx_PyMethod_New PyMethod_New -#endif - -/* PyVectorcallFastCallDict.proto */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); -#endif - -/* CythonFunctionShared.proto */ -#define __Pyx_CyFunction_USED -#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 -#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 -#define __Pyx_CYFUNCTION_CCLASS 0x04 -#define __Pyx_CYFUNCTION_COROUTINE 0x08 -#define __Pyx_CyFunction_GetClosure(f)\ - (((__pyx_CyFunctionObject *) (f))->func_closure) -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_CyFunction_GetClassObj(f)\ - (((__pyx_CyFunctionObject *) (f))->func_classobj) -#else - #define __Pyx_CyFunction_GetClassObj(f)\ - ((PyObject*) ((PyCMethodObject *) (f))->mm_class) -#endif -#define __Pyx_CyFunction_SetClassObj(f, classobj)\ - __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) -#define __Pyx_CyFunction_Defaults(type, f)\ - ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) -#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ - ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) -typedef struct { -#if PY_VERSION_HEX < 0x030900B1 - PyCFunctionObject func; -#else - PyCMethodObject func; -#endif -#if CYTHON_BACKPORT_VECTORCALL - __pyx_vectorcallfunc func_vectorcall; -#endif -#if PY_VERSION_HEX < 0x030500A0 - PyObject *func_weakreflist; -#endif - PyObject *func_dict; - PyObject *func_name; - PyObject *func_qualname; - PyObject *func_doc; - PyObject *func_globals; - PyObject *func_code; - PyObject *func_closure; -#if PY_VERSION_HEX < 0x030900B1 - PyObject *func_classobj; -#endif - void *defaults; - int defaults_pyobjects; - size_t defaults_size; // used by FusedFunction for copying defaults - int flags; - PyObject *defaults_tuple; - PyObject *defaults_kwdict; - PyObject *(*defaults_getter)(PyObject *); - PyObject *func_annotations; - PyObject *func_is_coroutine; -} __pyx_CyFunctionObject; -#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) -#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) -#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, - size_t size, - int pyobjects); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, - PyObject *tuple); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, - PyObject *dict); -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, - PyObject *dict); -static int __pyx_CyFunction_init(PyObject *module); -#if CYTHON_METH_FASTCALL -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -#if CYTHON_BACKPORT_VECTORCALL -#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) -#else -#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) -#endif -#endif - -/* CythonFunction.proto */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); - -/* PyObjectLookupSpecial.proto */ -#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS -#define __Pyx_PyObject_LookupSpecialNoError(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 0) -#define __Pyx_PyObject_LookupSpecial(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 1) -static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error); -#else -#define __Pyx_PyObject_LookupSpecialNoError(o,n) __Pyx_PyObject_GetAttrStrNoError(o,n) -#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) -#endif - -/* Py3ClassCreate.proto */ -static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, - PyObject *mkw, PyObject *modname, PyObject *doc); -static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, - PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -#if !CYTHON_COMPILING_IN_LIMITED_API -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); -#endif - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -/* GCCDiagnostics.proto */ -#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* FormatTypeName.proto */ -#if CYTHON_COMPILING_IN_LIMITED_API -typedef PyObject *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%U" -static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); -#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) -#else -typedef const char *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%.200s" -#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) -#define __Pyx_DECREF_TypeName(obj) -#endif - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - -/* #### Code section: module_declarations ### */ - -/* Module declarations from "cython" */ - -/* Module declarations from "fontTools.feaLib.lexer" */ -/* #### Code section: typeinfo ### */ -/* #### Code section: before_global_var ### */ -#define __Pyx_MODULE_NAME "fontTools.feaLib.lexer" -extern int __pyx_module_is_main_fontTools__feaLib__lexer; -int __pyx_module_is_main_fontTools__feaLib__lexer = 0; - -/* Implementation of "fontTools.feaLib.lexer" */ -/* #### Code section: global_var ### */ -static PyObject *__pyx_builtin_ImportError; -static PyObject *__pyx_builtin_object; -static PyObject *__pyx_builtin_staticmethod; -static PyObject *__pyx_builtin_StopIteration; -static PyObject *__pyx_builtin_open; -/* #### Code section: string_decls ### */ -static const char __pyx_k_[] = "\n"; -static const char __pyx_k_0[] = "0"; -static const char __pyx_k_p[] = "p"; -static const char __pyx_k_r[] = "r"; -static const char __pyx_k_s[] = "}\\s*"; -static const char __pyx_k__2[] = "\r"; -static const char __pyx_k__3[] = "#"; -static const char __pyx_k__4[] = "("; -static const char __pyx_k__5[] = ")"; -static const char __pyx_k__6[] = "\\"; -static const char __pyx_k__7[] = "@"; -static const char __pyx_k__8[] = "."; -static const char __pyx_k__9[] = "-"; -static const char __pyx_k_os[] = "os"; -static const char __pyx_k_re[] = "re"; -static const char __pyx_k_xX[] = "xX"; -static const char __pyx_k_CID[] = "CID"; -static const char __pyx_k__10[] = "\""; -static const char __pyx_k__11[] = "[\r\n]"; -static const char __pyx_k__12[] = ""; -static const char __pyx_k__13[] = "*"; -static const char __pyx_k__16[] = " \t"; -static const char __pyx_k__17[] = "\r\n"; -static const char __pyx_k__18[] = ",;:-+'{}[]<>()="; -static const char __pyx_k__19[] = "_+*:.^~!\\"; -static const char __pyx_k__20[] = "_.+*:^~!/-"; -static const char __pyx_k__51[] = "?"; -static const char __pyx_k_doc[] = "__doc__"; -static const char __pyx_k_err[] = "err"; -static const char __pyx_k_pop[] = "pop"; -static const char __pyx_k_pos[] = "pos_"; -static const char __pyx_k_s_2[] = "\\s*;"; -static const char __pyx_k_sub[] = "sub"; -static const char __pyx_k_tag[] = "tag"; -static const char __pyx_k_NAME[] = "NAME"; -static const char __pyx_k_data[] = "data"; -static const char __pyx_k_dict[] = "__dict__"; -static const char __pyx_k_init[] = "__init__"; -static const char __pyx_k_iter[] = "__iter__"; -static const char __pyx_k_join[] = "join"; -static const char __pyx_k_line[] = "line_"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_mode[] = "mode_"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_next[] = "__next__"; -static const char __pyx_k_open[] = "open"; -static const char __pyx_k_path[] = "path"; -static const char __pyx_k_read[] = "read"; -static const char __pyx_k_self[] = "self"; -static const char __pyx_k_spec[] = "__spec__"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_text[] = "text"; -static const char __pyx_k_FLOAT[] = "FLOAT"; -static const char __pyx_k_Lexer[] = "Lexer"; -static const char __pyx_k_OCTAL[] = "OCTAL"; -static const char __pyx_k_close[] = "close"; -static const char __pyx_k_isabs[] = "isabs"; -static const char __pyx_k_lexer[] = "lexer"; -static const char __pyx_k_limit[] = "limit"; -static const char __pyx_k_match[] = "match"; -static const char __pyx_k_split[] = "split"; -static const char __pyx_k_start[] = "start"; -static const char __pyx_k_strip[] = "strip"; -static const char __pyx_k_super[] = "super"; -static const char __pyx_k_token[] = "token"; -static const char __pyx_k_utf_8[] = "utf-8"; -static const char __pyx_k_valid[] = "valid"; -static const char __pyx_k_NORMAL[] = "NORMAL"; -static const char __pyx_k_NUMBER[] = "NUMBER"; -static const char __pyx_k_STRING[] = "STRING"; -static const char __pyx_k_SYMBOL[] = "SYMBOL"; -static const char __pyx_k_append[] = "append"; -static const char __pyx_k_column[] = "column"; -static const char __pyx_k_getcwd[] = "getcwd"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_lexers[] = "lexers_"; -static const char __pyx_k_module[] = "__module__"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_next_2[] = "next_"; -static const char __pyx_k_next_3[] = "next"; -static const char __pyx_k_object[] = "object"; -static const char __pyx_k_regexp[] = "regexp"; -static const char __pyx_k_string[] = "string"; -static const char __pyx_k_text_2[] = "text_"; -static const char __pyx_k_COMMENT[] = "COMMENT"; -static const char __pyx_k_NEWLINE[] = "NEWLINE"; -static const char __pyx_k_NUMBERS[] = "NUMBERS"; -static const char __pyx_k_closing[] = "closing"; -static const char __pyx_k_compile[] = "compile"; -static const char __pyx_k_curpath[] = "curpath"; -static const char __pyx_k_dirname[] = "dirname"; -static const char __pyx_k_fileobj[] = "fileobj"; -static const char __pyx_k_include[] = "include"; -static const char __pyx_k_prepare[] = "__prepare__"; -static const char __pyx_k_stop_at[] = "stop_at"; -static const char __pyx_k_FILENAME[] = "FILENAME"; -static const char __pyx_k_cur_char[] = "cur_char"; -static const char __pyx_k_encoding[] = "encoding"; -static const char __pyx_k_features[] = ""; -static const char __pyx_k_filename[] = "filename"; -static const char __pyx_k_location[] = "location_"; -static const char __pyx_k_maxsplit[] = "maxsplit"; -static const char __pyx_k_qualname[] = "__qualname__"; -static const char __pyx_k_set_name[] = "__set_name__"; -static const char __pyx_k_metaclass[] = "__metaclass__"; -static const char __pyx_k_next_char[] = "next_char"; -static const char __pyx_k_scan_over[] = "scan_over_"; -static const char __pyx_k_0123456789[] = "0123456789"; -static const char __pyx_k_A_Za_z_0_9[] = "^[A-Za-z_0-9.\\-]+$"; -static const char __pyx_k_CHAR_DIGIT[] = "CHAR_DIGIT_"; -static const char __pyx_k_GLYPHCLASS[] = "GLYPHCLASS"; -static const char __pyx_k_Lexer_next[] = "Lexer.next"; -static const char __pyx_k_filename_2[] = "filename_"; -static const char __pyx_k_fname_type[] = "fname_type"; -static const char __pyx_k_glyphclass[] = "glyphclass"; -static const char __pyx_k_includeDir[] = "includeDir"; -static const char __pyx_k_line_start[] = "line_start_"; -static const char __pyx_k_location_2[] = "location"; -static const char __pyx_k_make_lexer[] = "make_lexer_"; -static const char __pyx_k_scan_until[] = "scan_until_"; -static const char __pyx_k_token_type[] = "token_type"; -static const char __pyx_k_CHAR_LETTER[] = "CHAR_LETTER_"; -static const char __pyx_k_CHAR_SYMBOL[] = "CHAR_SYMBOL_"; -static const char __pyx_k_HEXADECIMAL[] = "HEXADECIMAL"; -static const char __pyx_k_ImportError[] = "ImportError"; -static const char __pyx_k_MODE_NORMAL[] = "MODE_NORMAL_"; -static const char __pyx_k_featurefile[] = "featurefile"; -static const char __pyx_k_fname_token[] = "fname_token"; -static const char __pyx_k_mro_entries[] = "__mro_entries__"; -static const char __pyx_k_text_length[] = "text_length_"; -static const char __pyx_k_CHAR_NEWLINE[] = "CHAR_NEWLINE_"; -static const char __pyx_k_Lexer___init[] = "Lexer.__init__"; -static const char __pyx_k_Lexer___iter[] = "Lexer.__iter__"; -static const char __pyx_k_Lexer___next[] = "Lexer.__next__"; -static const char __pyx_k_Lexer_next_2[] = "Lexer.next_"; -static const char __pyx_k_file_or_path[] = "file_or_path"; -static const char __pyx_k_initializing[] = "_initializing"; -static const char __pyx_k_is_coroutine[] = "_is_coroutine"; -static const char __pyx_k_staticmethod[] = "staticmethod"; -static const char __pyx_k_CHAR_HEXDIGIT[] = "CHAR_HEXDIGIT_"; -static const char __pyx_k_MODE_FILENAME[] = "MODE_FILENAME_"; -static const char __pyx_k_RE_GLYPHCLASS[] = "RE_GLYPHCLASS"; -static const char __pyx_k_StopIteration[] = "StopIteration"; -static const char __pyx_k_class_getitem[] = "__class_getitem__"; -static const char __pyx_k_init_subclass[] = "__init_subclass__"; -static const char __pyx_k_IncludingLexer[] = "IncludingLexer"; -static const char __pyx_k_Lexer_location[] = "Lexer.location_"; -static const char __pyx_k_fname_location[] = "fname_location"; -static const char __pyx_k_ANONYMOUS_BLOCK[] = "ANONYMOUS_BLOCK"; -static const char __pyx_k_CHAR_NAME_START[] = "CHAR_NAME_START_"; -static const char __pyx_k_CHAR_WHITESPACE[] = "CHAR_WHITESPACE_"; -static const char __pyx_k_FeatureLibError[] = "FeatureLibError"; -static const char __pyx_k_Lexer_scan_over[] = "Lexer.scan_over_"; -static const char __pyx_k_featurefilepath[] = "featurefilepath"; -static const char __pyx_k_Lexer_scan_until[] = "Lexer.scan_until_"; -static const char __pyx_k_FileNotFoundError[] = "FileNotFoundError"; -static const char __pyx_k_NonIncludingLexer[] = "NonIncludingLexer"; -static const char __pyx_k_Expected_file_name[] = "Expected file name"; -static const char __pyx_k_FeatureLibLocation[] = "FeatureLibLocation"; -static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_IncludedFeaNotFound[] = "IncludedFeaNotFound"; -static const char __pyx_k_IncludingLexer_next[] = "IncludingLexer.next"; -static const char __pyx_k_scan_anonymous_block[] = "scan_anonymous_block"; -static const char __pyx_k_IncludingLexer___init[] = "IncludingLexer.__init__"; -static const char __pyx_k_IncludingLexer___iter[] = "IncludingLexer.__iter__"; -static const char __pyx_k_IncludingLexer___next[] = "IncludingLexer.__next__"; -static const char __pyx_k_0123456789ABCDEFabcdef[] = "0123456789ABCDEFabcdef"; -static const char __pyx_k_CHAR_NAME_CONTINUATION[] = "CHAR_NAME_CONTINUATION_"; -static const char __pyx_k_Unexpected_character_r[] = "Unexpected character: %r"; -static const char __pyx_k_fontTools_feaLib_error[] = "fontTools.feaLib.error"; -static const char __pyx_k_fontTools_feaLib_lexer[] = "fontTools.feaLib.lexer"; -static const char __pyx_k_Expected_after_file_name[] = "Expected ')' after file name"; -static const char __pyx_k_NonIncludingLexer___next[] = "NonIncludingLexer.__next__"; -static const char __pyx_k_Expected_before_file_name[] = "Expected '(' before file name"; -static const char __pyx_k_Expected_glyph_class_name[] = "Expected glyph class name"; -static const char __pyx_k_IncludingLexer_make_lexer[] = "IncludingLexer.make_lexer_"; -static const char __pyx_k_fontTools_feaLib_location[] = "fontTools.feaLib.location"; -static const char __pyx_k_Lexer_scan_anonymous_block[] = "Lexer.scan_anonymous_block"; -static const char __pyx_k_Too_many_recursive_includes[] = "Too many recursive includes"; -static const char __pyx_k_Expected_to_terminate_string[] = "Expected '\"' to terminate string"; -static const char __pyx_k_Lib_fontTools_feaLib_lexer_py[] = "Lib/fontTools/feaLib/lexer.py"; -static const char __pyx_k_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; -static const char __pyx_k_A_Lexer_that_follows_include_sta[] = "A Lexer that follows include statements.\n\n The OpenType feature file specification states that due to\n historical reasons, relative imports should be resolved in this\n order:\n\n 1. If the source font is UFO format, then relative to the UFO's\n font directory\n 2. relative to the top-level include file\n 3. relative to the parent include file\n\n We only support 1 (via includeDir) and 2.\n "; -static const char __pyx_k_Expected_s_to_terminate_anonymou[] = "Expected '} %s;' to terminate anonymous block"; -static const char __pyx_k_Glyph_class_names_must_consist_o[] = "Glyph class names must consist of letters, digits, underscore, period or hyphen"; -static const char __pyx_k_Glyph_class_names_must_not_be_lo[] = "Glyph class names must not be longer than 63 characters"; -static const char __pyx_k_IncludingLexer_scan_anonymous_bl[] = "IncludingLexer.scan_anonymous_block"; -static const char __pyx_k_Lexer_that_does_not_follow_inclu[] = "Lexer that does not follow `include` statements, emits them as-is."; -/* #### Code section: decls ### */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_text, PyObject *__pyx_v_filename); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_8location_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_10next_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_valid); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_stop_at); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_featurefile, PyObject *__pyx_v_includeDir); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_file_or_path); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag); /* proto */ -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ -static __Pyx_CachedCFunction __pyx_umethod_PyList_Type_pop = {0, 0, 0, 0, 0}; -/* #### Code section: late_includes ### */ -/* #### Code section: module_state ### */ -typedef struct { - PyObject *__pyx_d; - PyObject *__pyx_b; - PyObject *__pyx_cython_runtime; - PyObject *__pyx_empty_tuple; - PyObject *__pyx_empty_bytes; - PyObject *__pyx_empty_unicode; - #ifdef __Pyx_CyFunction_USED - PyTypeObject *__pyx_CyFunctionType; - #endif - #ifdef __Pyx_FusedFunction_USED - PyTypeObject *__pyx_FusedFunctionType; - #endif - #ifdef __Pyx_Generator_USED - PyTypeObject *__pyx_GeneratorType; - #endif - #ifdef __Pyx_IterableCoroutine_USED - PyTypeObject *__pyx_IterableCoroutineType; - #endif - #ifdef __Pyx_Coroutine_USED - PyTypeObject *__pyx_CoroutineAwaitType; - #endif - #ifdef __Pyx_Coroutine_USED - PyTypeObject *__pyx_CoroutineType; - #endif - #if CYTHON_USE_MODULE_STATE - #endif - #if CYTHON_USE_MODULE_STATE - #endif - PyObject *__pyx_kp_u_; - PyObject *__pyx_kp_u_0; - PyObject *__pyx_kp_u_0123456789; - PyObject *__pyx_kp_u_0123456789ABCDEFabcdef; - PyObject *__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef; - PyObject *__pyx_n_s_ANONYMOUS_BLOCK; - PyObject *__pyx_n_u_ANONYMOUS_BLOCK; - PyObject *__pyx_kp_s_A_Lexer_that_follows_include_sta; - PyObject *__pyx_kp_u_A_Za_z_0_9; - PyObject *__pyx_n_s_CHAR_DIGIT; - PyObject *__pyx_n_s_CHAR_HEXDIGIT; - PyObject *__pyx_n_s_CHAR_LETTER; - PyObject *__pyx_n_s_CHAR_NAME_CONTINUATION; - PyObject *__pyx_n_s_CHAR_NAME_START; - PyObject *__pyx_n_s_CHAR_NEWLINE; - PyObject *__pyx_n_s_CHAR_SYMBOL; - PyObject *__pyx_n_s_CHAR_WHITESPACE; - PyObject *__pyx_n_s_CID; - PyObject *__pyx_n_u_CID; - PyObject *__pyx_n_s_COMMENT; - PyObject *__pyx_n_u_COMMENT; - PyObject *__pyx_kp_u_Expected_after_file_name; - PyObject *__pyx_kp_u_Expected_before_file_name; - PyObject *__pyx_kp_u_Expected_file_name; - PyObject *__pyx_kp_u_Expected_glyph_class_name; - PyObject *__pyx_kp_u_Expected_s_to_terminate_anonymou; - PyObject *__pyx_kp_u_Expected_to_terminate_string; - PyObject *__pyx_n_s_FILENAME; - PyObject *__pyx_n_u_FILENAME; - PyObject *__pyx_n_s_FLOAT; - PyObject *__pyx_n_u_FLOAT; - PyObject *__pyx_n_s_FeatureLibError; - PyObject *__pyx_n_s_FeatureLibLocation; - PyObject *__pyx_n_s_FileNotFoundError; - PyObject *__pyx_n_s_GLYPHCLASS; - PyObject *__pyx_n_u_GLYPHCLASS; - PyObject *__pyx_kp_u_Glyph_class_names_must_consist_o; - PyObject *__pyx_kp_u_Glyph_class_names_must_not_be_lo; - PyObject *__pyx_n_s_HEXADECIMAL; - PyObject *__pyx_n_u_HEXADECIMAL; - PyObject *__pyx_n_s_ImportError; - PyObject *__pyx_n_s_IncludedFeaNotFound; - PyObject *__pyx_n_s_IncludingLexer; - PyObject *__pyx_n_s_IncludingLexer___init; - PyObject *__pyx_n_s_IncludingLexer___iter; - PyObject *__pyx_n_s_IncludingLexer___next; - PyObject *__pyx_n_s_IncludingLexer_make_lexer; - PyObject *__pyx_n_s_IncludingLexer_next; - PyObject *__pyx_n_s_IncludingLexer_scan_anonymous_bl; - PyObject *__pyx_n_s_Lexer; - PyObject *__pyx_n_s_Lexer___init; - PyObject *__pyx_n_s_Lexer___iter; - PyObject *__pyx_n_s_Lexer___next; - PyObject *__pyx_n_s_Lexer_location; - PyObject *__pyx_n_s_Lexer_next; - PyObject *__pyx_n_s_Lexer_next_2; - PyObject *__pyx_n_s_Lexer_scan_anonymous_block; - PyObject *__pyx_n_s_Lexer_scan_over; - PyObject *__pyx_n_s_Lexer_scan_until; - PyObject *__pyx_kp_s_Lexer_that_does_not_follow_inclu; - PyObject *__pyx_kp_s_Lib_fontTools_feaLib_lexer_py; - PyObject *__pyx_n_s_MODE_FILENAME; - PyObject *__pyx_n_s_MODE_NORMAL; - PyObject *__pyx_n_s_NAME; - PyObject *__pyx_n_u_NAME; - PyObject *__pyx_n_s_NEWLINE; - PyObject *__pyx_n_u_NEWLINE; - PyObject *__pyx_n_u_NORMAL; - PyObject *__pyx_n_s_NUMBER; - PyObject *__pyx_n_u_NUMBER; - PyObject *__pyx_n_s_NUMBERS; - PyObject *__pyx_n_s_NonIncludingLexer; - PyObject *__pyx_n_s_NonIncludingLexer___next; - PyObject *__pyx_n_s_OCTAL; - PyObject *__pyx_n_u_OCTAL; - PyObject *__pyx_n_s_RE_GLYPHCLASS; - PyObject *__pyx_n_s_STRING; - PyObject *__pyx_n_u_STRING; - PyObject *__pyx_n_s_SYMBOL; - PyObject *__pyx_n_u_SYMBOL; - PyObject *__pyx_n_s_StopIteration; - PyObject *__pyx_kp_u_Too_many_recursive_includes; - PyObject *__pyx_kp_u_Unexpected_character_r; - PyObject *__pyx_kp_u__10; - PyObject *__pyx_kp_u__11; - PyObject *__pyx_kp_u__12; - PyObject *__pyx_n_s__13; - PyObject *__pyx_kp_u__16; - PyObject *__pyx_kp_u__17; - PyObject *__pyx_kp_u__18; - PyObject *__pyx_kp_u__19; - PyObject *__pyx_kp_u__2; - PyObject *__pyx_kp_u__20; - PyObject *__pyx_kp_u__3; - PyObject *__pyx_kp_u__4; - PyObject *__pyx_kp_u__5; - PyObject *__pyx_n_s__51; - PyObject *__pyx_kp_u__6; - PyObject *__pyx_kp_u__7; - PyObject *__pyx_kp_u__8; - PyObject *__pyx_kp_u__9; - PyObject *__pyx_n_s_append; - PyObject *__pyx_n_s_asyncio_coroutines; - PyObject *__pyx_n_s_class_getitem; - PyObject *__pyx_n_s_cline_in_traceback; - PyObject *__pyx_n_s_close; - PyObject *__pyx_n_s_closing; - PyObject *__pyx_n_s_column; - PyObject *__pyx_n_s_compile; - PyObject *__pyx_n_s_cur_char; - PyObject *__pyx_n_s_curpath; - PyObject *__pyx_n_s_data; - PyObject *__pyx_n_s_dict; - PyObject *__pyx_n_s_dirname; - PyObject *__pyx_n_s_doc; - PyObject *__pyx_n_s_encoding; - PyObject *__pyx_n_s_err; - PyObject *__pyx_n_s_featurefile; - PyObject *__pyx_n_s_featurefilepath; - PyObject *__pyx_kp_u_features; - PyObject *__pyx_n_s_file_or_path; - PyObject *__pyx_n_s_filename; - PyObject *__pyx_n_s_filename_2; - PyObject *__pyx_n_s_fileobj; - PyObject *__pyx_n_s_fname_location; - PyObject *__pyx_n_s_fname_token; - PyObject *__pyx_n_s_fname_type; - PyObject *__pyx_n_s_fontTools_feaLib_error; - PyObject *__pyx_n_s_fontTools_feaLib_lexer; - PyObject *__pyx_n_s_fontTools_feaLib_location; - PyObject *__pyx_n_s_getcwd; - PyObject *__pyx_n_s_glyphclass; - PyObject *__pyx_n_s_import; - PyObject *__pyx_n_u_include; - PyObject *__pyx_n_s_includeDir; - PyObject *__pyx_n_s_init; - PyObject *__pyx_n_s_init_subclass; - PyObject *__pyx_n_s_initializing; - PyObject *__pyx_n_s_is_coroutine; - PyObject *__pyx_n_s_isabs; - PyObject *__pyx_n_s_iter; - PyObject *__pyx_n_s_join; - PyObject *__pyx_n_s_lexer; - PyObject *__pyx_n_s_lexers; - PyObject *__pyx_n_s_limit; - PyObject *__pyx_n_s_line; - PyObject *__pyx_n_s_line_start; - PyObject *__pyx_n_s_location; - PyObject *__pyx_n_s_location_2; - PyObject *__pyx_n_s_main; - PyObject *__pyx_n_s_make_lexer; - PyObject *__pyx_n_s_match; - PyObject *__pyx_n_s_maxsplit; - PyObject *__pyx_n_s_metaclass; - PyObject *__pyx_n_s_mode; - PyObject *__pyx_n_s_module; - PyObject *__pyx_n_s_mro_entries; - PyObject *__pyx_n_u_name; - PyObject *__pyx_n_s_name_2; - PyObject *__pyx_n_s_next; - PyObject *__pyx_n_s_next_2; - PyObject *__pyx_n_s_next_3; - PyObject *__pyx_n_s_next_char; - PyObject *__pyx_n_s_object; - PyObject *__pyx_n_s_open; - PyObject *__pyx_n_s_os; - PyObject *__pyx_n_s_p; - PyObject *__pyx_n_s_path; - PyObject *__pyx_n_s_pop; - PyObject *__pyx_n_s_pos; - PyObject *__pyx_n_s_prepare; - PyObject *__pyx_n_s_qualname; - PyObject *__pyx_n_u_r; - PyObject *__pyx_n_s_re; - PyObject *__pyx_n_s_read; - PyObject *__pyx_n_u_read; - PyObject *__pyx_n_s_regexp; - PyObject *__pyx_kp_u_s; - PyObject *__pyx_kp_u_s_2; - PyObject *__pyx_n_s_scan_anonymous_block; - PyObject *__pyx_n_s_scan_over; - PyObject *__pyx_n_s_scan_until; - PyObject *__pyx_n_s_self; - PyObject *__pyx_n_s_set_name; - PyObject *__pyx_n_s_spec; - PyObject *__pyx_n_s_split; - PyObject *__pyx_n_s_start; - PyObject *__pyx_n_s_staticmethod; - PyObject *__pyx_n_s_stop_at; - PyObject *__pyx_n_s_string; - PyObject *__pyx_n_s_strip; - PyObject *__pyx_n_s_sub; - PyObject *__pyx_n_s_super; - PyObject *__pyx_n_s_tag; - PyObject *__pyx_n_s_test; - PyObject *__pyx_n_s_text; - PyObject *__pyx_n_s_text_2; - PyObject *__pyx_n_s_text_length; - PyObject *__pyx_n_s_token; - PyObject *__pyx_n_s_token_type; - PyObject *__pyx_kp_u_utf_8; - PyObject *__pyx_n_s_valid; - PyObject *__pyx_n_u_xX; - PyObject *__pyx_int_0; - PyObject *__pyx_int_1; - PyObject *__pyx_int_2; - PyObject *__pyx_int_8; - PyObject *__pyx_int_10; - PyObject *__pyx_int_16; - PyObject *__pyx_tuple__14; - PyObject *__pyx_tuple__15; - PyObject *__pyx_tuple__21; - PyObject *__pyx_tuple__23; - PyObject *__pyx_tuple__26; - PyObject *__pyx_tuple__28; - PyObject *__pyx_tuple__30; - PyObject *__pyx_tuple__32; - PyObject *__pyx_tuple__34; - PyObject *__pyx_tuple__36; - PyObject *__pyx_tuple__38; - PyObject *__pyx_tuple__39; - PyObject *__pyx_tuple__40; - PyObject *__pyx_tuple__44; - PyObject *__pyx_tuple__46; - PyObject *__pyx_tuple__48; - PyObject *__pyx_codeobj__22; - PyObject *__pyx_codeobj__24; - PyObject *__pyx_codeobj__25; - PyObject *__pyx_codeobj__27; - PyObject *__pyx_codeobj__29; - PyObject *__pyx_codeobj__31; - PyObject *__pyx_codeobj__33; - PyObject *__pyx_codeobj__35; - PyObject *__pyx_codeobj__37; - PyObject *__pyx_codeobj__41; - PyObject *__pyx_codeobj__42; - PyObject *__pyx_codeobj__43; - PyObject *__pyx_codeobj__45; - PyObject *__pyx_codeobj__47; - PyObject *__pyx_codeobj__49; - PyObject *__pyx_codeobj__50; -} __pyx_mstate; - -#if CYTHON_USE_MODULE_STATE -#ifdef __cplusplus -namespace { - extern struct PyModuleDef __pyx_moduledef; -} /* anonymous namespace */ -#else -static struct PyModuleDef __pyx_moduledef; -#endif - -#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o)) - -#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef))) - -#define __pyx_m (PyState_FindModule(&__pyx_moduledef)) -#else -static __pyx_mstate __pyx_mstate_global_static = -#ifdef __cplusplus - {}; -#else - {0}; -#endif -static __pyx_mstate *__pyx_mstate_global = &__pyx_mstate_global_static; -#endif -/* #### Code section: module_state_clear ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_clear(PyObject *m) { - __pyx_mstate *clear_module_state = __pyx_mstate(m); - if (!clear_module_state) return 0; - Py_CLEAR(clear_module_state->__pyx_d); - Py_CLEAR(clear_module_state->__pyx_b); - Py_CLEAR(clear_module_state->__pyx_cython_runtime); - Py_CLEAR(clear_module_state->__pyx_empty_tuple); - Py_CLEAR(clear_module_state->__pyx_empty_bytes); - Py_CLEAR(clear_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_CLEAR(clear_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_CLEAR(clear_module_state->__pyx_FusedFunctionType); - #endif - Py_CLEAR(clear_module_state->__pyx_kp_u_); - Py_CLEAR(clear_module_state->__pyx_kp_u_0); - Py_CLEAR(clear_module_state->__pyx_kp_u_0123456789); - Py_CLEAR(clear_module_state->__pyx_kp_u_0123456789ABCDEFabcdef); - Py_CLEAR(clear_module_state->__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef); - Py_CLEAR(clear_module_state->__pyx_n_s_ANONYMOUS_BLOCK); - Py_CLEAR(clear_module_state->__pyx_n_u_ANONYMOUS_BLOCK); - Py_CLEAR(clear_module_state->__pyx_kp_s_A_Lexer_that_follows_include_sta); - Py_CLEAR(clear_module_state->__pyx_kp_u_A_Za_z_0_9); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_DIGIT); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_HEXDIGIT); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_LETTER); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_NAME_CONTINUATION); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_NAME_START); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_NEWLINE); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_SYMBOL); - Py_CLEAR(clear_module_state->__pyx_n_s_CHAR_WHITESPACE); - Py_CLEAR(clear_module_state->__pyx_n_s_CID); - Py_CLEAR(clear_module_state->__pyx_n_u_CID); - Py_CLEAR(clear_module_state->__pyx_n_s_COMMENT); - Py_CLEAR(clear_module_state->__pyx_n_u_COMMENT); - Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_after_file_name); - Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_before_file_name); - Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_file_name); - Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_glyph_class_name); - Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_s_to_terminate_anonymou); - Py_CLEAR(clear_module_state->__pyx_kp_u_Expected_to_terminate_string); - Py_CLEAR(clear_module_state->__pyx_n_s_FILENAME); - Py_CLEAR(clear_module_state->__pyx_n_u_FILENAME); - Py_CLEAR(clear_module_state->__pyx_n_s_FLOAT); - Py_CLEAR(clear_module_state->__pyx_n_u_FLOAT); - Py_CLEAR(clear_module_state->__pyx_n_s_FeatureLibError); - Py_CLEAR(clear_module_state->__pyx_n_s_FeatureLibLocation); - Py_CLEAR(clear_module_state->__pyx_n_s_FileNotFoundError); - Py_CLEAR(clear_module_state->__pyx_n_s_GLYPHCLASS); - Py_CLEAR(clear_module_state->__pyx_n_u_GLYPHCLASS); - Py_CLEAR(clear_module_state->__pyx_kp_u_Glyph_class_names_must_consist_o); - Py_CLEAR(clear_module_state->__pyx_kp_u_Glyph_class_names_must_not_be_lo); - Py_CLEAR(clear_module_state->__pyx_n_s_HEXADECIMAL); - Py_CLEAR(clear_module_state->__pyx_n_u_HEXADECIMAL); - Py_CLEAR(clear_module_state->__pyx_n_s_ImportError); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludedFeaNotFound); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer___init); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer___iter); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer___next); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer_make_lexer); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer_next); - Py_CLEAR(clear_module_state->__pyx_n_s_IncludingLexer_scan_anonymous_bl); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer___init); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer___iter); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer___next); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_location); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_next); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_next_2); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_scan_anonymous_block); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_scan_over); - Py_CLEAR(clear_module_state->__pyx_n_s_Lexer_scan_until); - Py_CLEAR(clear_module_state->__pyx_kp_s_Lexer_that_does_not_follow_inclu); - Py_CLEAR(clear_module_state->__pyx_kp_s_Lib_fontTools_feaLib_lexer_py); - Py_CLEAR(clear_module_state->__pyx_n_s_MODE_FILENAME); - Py_CLEAR(clear_module_state->__pyx_n_s_MODE_NORMAL); - Py_CLEAR(clear_module_state->__pyx_n_s_NAME); - Py_CLEAR(clear_module_state->__pyx_n_u_NAME); - Py_CLEAR(clear_module_state->__pyx_n_s_NEWLINE); - Py_CLEAR(clear_module_state->__pyx_n_u_NEWLINE); - Py_CLEAR(clear_module_state->__pyx_n_u_NORMAL); - Py_CLEAR(clear_module_state->__pyx_n_s_NUMBER); - Py_CLEAR(clear_module_state->__pyx_n_u_NUMBER); - Py_CLEAR(clear_module_state->__pyx_n_s_NUMBERS); - Py_CLEAR(clear_module_state->__pyx_n_s_NonIncludingLexer); - Py_CLEAR(clear_module_state->__pyx_n_s_NonIncludingLexer___next); - Py_CLEAR(clear_module_state->__pyx_n_s_OCTAL); - Py_CLEAR(clear_module_state->__pyx_n_u_OCTAL); - Py_CLEAR(clear_module_state->__pyx_n_s_RE_GLYPHCLASS); - Py_CLEAR(clear_module_state->__pyx_n_s_STRING); - Py_CLEAR(clear_module_state->__pyx_n_u_STRING); - Py_CLEAR(clear_module_state->__pyx_n_s_SYMBOL); - Py_CLEAR(clear_module_state->__pyx_n_u_SYMBOL); - Py_CLEAR(clear_module_state->__pyx_n_s_StopIteration); - Py_CLEAR(clear_module_state->__pyx_kp_u_Too_many_recursive_includes); - Py_CLEAR(clear_module_state->__pyx_kp_u_Unexpected_character_r); - Py_CLEAR(clear_module_state->__pyx_kp_u__10); - Py_CLEAR(clear_module_state->__pyx_kp_u__11); - Py_CLEAR(clear_module_state->__pyx_kp_u__12); - Py_CLEAR(clear_module_state->__pyx_n_s__13); - Py_CLEAR(clear_module_state->__pyx_kp_u__16); - Py_CLEAR(clear_module_state->__pyx_kp_u__17); - Py_CLEAR(clear_module_state->__pyx_kp_u__18); - Py_CLEAR(clear_module_state->__pyx_kp_u__19); - Py_CLEAR(clear_module_state->__pyx_kp_u__2); - Py_CLEAR(clear_module_state->__pyx_kp_u__20); - Py_CLEAR(clear_module_state->__pyx_kp_u__3); - Py_CLEAR(clear_module_state->__pyx_kp_u__4); - Py_CLEAR(clear_module_state->__pyx_kp_u__5); - Py_CLEAR(clear_module_state->__pyx_n_s__51); - Py_CLEAR(clear_module_state->__pyx_kp_u__6); - Py_CLEAR(clear_module_state->__pyx_kp_u__7); - Py_CLEAR(clear_module_state->__pyx_kp_u__8); - Py_CLEAR(clear_module_state->__pyx_kp_u__9); - Py_CLEAR(clear_module_state->__pyx_n_s_append); - Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines); - Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem); - Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback); - Py_CLEAR(clear_module_state->__pyx_n_s_close); - Py_CLEAR(clear_module_state->__pyx_n_s_closing); - Py_CLEAR(clear_module_state->__pyx_n_s_column); - Py_CLEAR(clear_module_state->__pyx_n_s_compile); - Py_CLEAR(clear_module_state->__pyx_n_s_cur_char); - Py_CLEAR(clear_module_state->__pyx_n_s_curpath); - Py_CLEAR(clear_module_state->__pyx_n_s_data); - Py_CLEAR(clear_module_state->__pyx_n_s_dict); - Py_CLEAR(clear_module_state->__pyx_n_s_dirname); - Py_CLEAR(clear_module_state->__pyx_n_s_doc); - Py_CLEAR(clear_module_state->__pyx_n_s_encoding); - Py_CLEAR(clear_module_state->__pyx_n_s_err); - Py_CLEAR(clear_module_state->__pyx_n_s_featurefile); - Py_CLEAR(clear_module_state->__pyx_n_s_featurefilepath); - Py_CLEAR(clear_module_state->__pyx_kp_u_features); - Py_CLEAR(clear_module_state->__pyx_n_s_file_or_path); - Py_CLEAR(clear_module_state->__pyx_n_s_filename); - Py_CLEAR(clear_module_state->__pyx_n_s_filename_2); - Py_CLEAR(clear_module_state->__pyx_n_s_fileobj); - Py_CLEAR(clear_module_state->__pyx_n_s_fname_location); - Py_CLEAR(clear_module_state->__pyx_n_s_fname_token); - Py_CLEAR(clear_module_state->__pyx_n_s_fname_type); - Py_CLEAR(clear_module_state->__pyx_n_s_fontTools_feaLib_error); - Py_CLEAR(clear_module_state->__pyx_n_s_fontTools_feaLib_lexer); - Py_CLEAR(clear_module_state->__pyx_n_s_fontTools_feaLib_location); - Py_CLEAR(clear_module_state->__pyx_n_s_getcwd); - Py_CLEAR(clear_module_state->__pyx_n_s_glyphclass); - Py_CLEAR(clear_module_state->__pyx_n_s_import); - Py_CLEAR(clear_module_state->__pyx_n_u_include); - Py_CLEAR(clear_module_state->__pyx_n_s_includeDir); - Py_CLEAR(clear_module_state->__pyx_n_s_init); - Py_CLEAR(clear_module_state->__pyx_n_s_init_subclass); - Py_CLEAR(clear_module_state->__pyx_n_s_initializing); - Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine); - Py_CLEAR(clear_module_state->__pyx_n_s_isabs); - Py_CLEAR(clear_module_state->__pyx_n_s_iter); - Py_CLEAR(clear_module_state->__pyx_n_s_join); - Py_CLEAR(clear_module_state->__pyx_n_s_lexer); - Py_CLEAR(clear_module_state->__pyx_n_s_lexers); - Py_CLEAR(clear_module_state->__pyx_n_s_limit); - Py_CLEAR(clear_module_state->__pyx_n_s_line); - Py_CLEAR(clear_module_state->__pyx_n_s_line_start); - Py_CLEAR(clear_module_state->__pyx_n_s_location); - Py_CLEAR(clear_module_state->__pyx_n_s_location_2); - Py_CLEAR(clear_module_state->__pyx_n_s_main); - Py_CLEAR(clear_module_state->__pyx_n_s_make_lexer); - Py_CLEAR(clear_module_state->__pyx_n_s_match); - Py_CLEAR(clear_module_state->__pyx_n_s_maxsplit); - Py_CLEAR(clear_module_state->__pyx_n_s_metaclass); - Py_CLEAR(clear_module_state->__pyx_n_s_mode); - Py_CLEAR(clear_module_state->__pyx_n_s_module); - Py_CLEAR(clear_module_state->__pyx_n_s_mro_entries); - Py_CLEAR(clear_module_state->__pyx_n_u_name); - Py_CLEAR(clear_module_state->__pyx_n_s_name_2); - Py_CLEAR(clear_module_state->__pyx_n_s_next); - Py_CLEAR(clear_module_state->__pyx_n_s_next_2); - Py_CLEAR(clear_module_state->__pyx_n_s_next_3); - Py_CLEAR(clear_module_state->__pyx_n_s_next_char); - Py_CLEAR(clear_module_state->__pyx_n_s_object); - Py_CLEAR(clear_module_state->__pyx_n_s_open); - Py_CLEAR(clear_module_state->__pyx_n_s_os); - Py_CLEAR(clear_module_state->__pyx_n_s_p); - Py_CLEAR(clear_module_state->__pyx_n_s_path); - Py_CLEAR(clear_module_state->__pyx_n_s_pop); - Py_CLEAR(clear_module_state->__pyx_n_s_pos); - Py_CLEAR(clear_module_state->__pyx_n_s_prepare); - Py_CLEAR(clear_module_state->__pyx_n_s_qualname); - Py_CLEAR(clear_module_state->__pyx_n_u_r); - Py_CLEAR(clear_module_state->__pyx_n_s_re); - Py_CLEAR(clear_module_state->__pyx_n_s_read); - Py_CLEAR(clear_module_state->__pyx_n_u_read); - Py_CLEAR(clear_module_state->__pyx_n_s_regexp); - Py_CLEAR(clear_module_state->__pyx_kp_u_s); - Py_CLEAR(clear_module_state->__pyx_kp_u_s_2); - Py_CLEAR(clear_module_state->__pyx_n_s_scan_anonymous_block); - Py_CLEAR(clear_module_state->__pyx_n_s_scan_over); - Py_CLEAR(clear_module_state->__pyx_n_s_scan_until); - Py_CLEAR(clear_module_state->__pyx_n_s_self); - Py_CLEAR(clear_module_state->__pyx_n_s_set_name); - Py_CLEAR(clear_module_state->__pyx_n_s_spec); - Py_CLEAR(clear_module_state->__pyx_n_s_split); - Py_CLEAR(clear_module_state->__pyx_n_s_start); - Py_CLEAR(clear_module_state->__pyx_n_s_staticmethod); - Py_CLEAR(clear_module_state->__pyx_n_s_stop_at); - Py_CLEAR(clear_module_state->__pyx_n_s_string); - Py_CLEAR(clear_module_state->__pyx_n_s_strip); - Py_CLEAR(clear_module_state->__pyx_n_s_sub); - Py_CLEAR(clear_module_state->__pyx_n_s_super); - Py_CLEAR(clear_module_state->__pyx_n_s_tag); - Py_CLEAR(clear_module_state->__pyx_n_s_test); - Py_CLEAR(clear_module_state->__pyx_n_s_text); - Py_CLEAR(clear_module_state->__pyx_n_s_text_2); - Py_CLEAR(clear_module_state->__pyx_n_s_text_length); - Py_CLEAR(clear_module_state->__pyx_n_s_token); - Py_CLEAR(clear_module_state->__pyx_n_s_token_type); - Py_CLEAR(clear_module_state->__pyx_kp_u_utf_8); - Py_CLEAR(clear_module_state->__pyx_n_s_valid); - Py_CLEAR(clear_module_state->__pyx_n_u_xX); - Py_CLEAR(clear_module_state->__pyx_int_0); - Py_CLEAR(clear_module_state->__pyx_int_1); - Py_CLEAR(clear_module_state->__pyx_int_2); - Py_CLEAR(clear_module_state->__pyx_int_8); - Py_CLEAR(clear_module_state->__pyx_int_10); - Py_CLEAR(clear_module_state->__pyx_int_16); - Py_CLEAR(clear_module_state->__pyx_tuple__14); - Py_CLEAR(clear_module_state->__pyx_tuple__15); - Py_CLEAR(clear_module_state->__pyx_tuple__21); - Py_CLEAR(clear_module_state->__pyx_tuple__23); - Py_CLEAR(clear_module_state->__pyx_tuple__26); - Py_CLEAR(clear_module_state->__pyx_tuple__28); - Py_CLEAR(clear_module_state->__pyx_tuple__30); - Py_CLEAR(clear_module_state->__pyx_tuple__32); - Py_CLEAR(clear_module_state->__pyx_tuple__34); - Py_CLEAR(clear_module_state->__pyx_tuple__36); - Py_CLEAR(clear_module_state->__pyx_tuple__38); - Py_CLEAR(clear_module_state->__pyx_tuple__39); - Py_CLEAR(clear_module_state->__pyx_tuple__40); - Py_CLEAR(clear_module_state->__pyx_tuple__44); - Py_CLEAR(clear_module_state->__pyx_tuple__46); - Py_CLEAR(clear_module_state->__pyx_tuple__48); - Py_CLEAR(clear_module_state->__pyx_codeobj__22); - Py_CLEAR(clear_module_state->__pyx_codeobj__24); - Py_CLEAR(clear_module_state->__pyx_codeobj__25); - Py_CLEAR(clear_module_state->__pyx_codeobj__27); - Py_CLEAR(clear_module_state->__pyx_codeobj__29); - Py_CLEAR(clear_module_state->__pyx_codeobj__31); - Py_CLEAR(clear_module_state->__pyx_codeobj__33); - Py_CLEAR(clear_module_state->__pyx_codeobj__35); - Py_CLEAR(clear_module_state->__pyx_codeobj__37); - Py_CLEAR(clear_module_state->__pyx_codeobj__41); - Py_CLEAR(clear_module_state->__pyx_codeobj__42); - Py_CLEAR(clear_module_state->__pyx_codeobj__43); - Py_CLEAR(clear_module_state->__pyx_codeobj__45); - Py_CLEAR(clear_module_state->__pyx_codeobj__47); - Py_CLEAR(clear_module_state->__pyx_codeobj__49); - Py_CLEAR(clear_module_state->__pyx_codeobj__50); - return 0; -} -#endif -/* #### Code section: module_state_traverse ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { - __pyx_mstate *traverse_module_state = __pyx_mstate(m); - if (!traverse_module_state) return 0; - Py_VISIT(traverse_module_state->__pyx_d); - Py_VISIT(traverse_module_state->__pyx_b); - Py_VISIT(traverse_module_state->__pyx_cython_runtime); - Py_VISIT(traverse_module_state->__pyx_empty_tuple); - Py_VISIT(traverse_module_state->__pyx_empty_bytes); - Py_VISIT(traverse_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_VISIT(traverse_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_VISIT(traverse_module_state->__pyx_FusedFunctionType); - #endif - Py_VISIT(traverse_module_state->__pyx_kp_u_); - Py_VISIT(traverse_module_state->__pyx_kp_u_0); - Py_VISIT(traverse_module_state->__pyx_kp_u_0123456789); - Py_VISIT(traverse_module_state->__pyx_kp_u_0123456789ABCDEFabcdef); - Py_VISIT(traverse_module_state->__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef); - Py_VISIT(traverse_module_state->__pyx_n_s_ANONYMOUS_BLOCK); - Py_VISIT(traverse_module_state->__pyx_n_u_ANONYMOUS_BLOCK); - Py_VISIT(traverse_module_state->__pyx_kp_s_A_Lexer_that_follows_include_sta); - Py_VISIT(traverse_module_state->__pyx_kp_u_A_Za_z_0_9); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_DIGIT); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_HEXDIGIT); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_LETTER); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_NAME_CONTINUATION); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_NAME_START); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_NEWLINE); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_SYMBOL); - Py_VISIT(traverse_module_state->__pyx_n_s_CHAR_WHITESPACE); - Py_VISIT(traverse_module_state->__pyx_n_s_CID); - Py_VISIT(traverse_module_state->__pyx_n_u_CID); - Py_VISIT(traverse_module_state->__pyx_n_s_COMMENT); - Py_VISIT(traverse_module_state->__pyx_n_u_COMMENT); - Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_after_file_name); - Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_before_file_name); - Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_file_name); - Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_glyph_class_name); - Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_s_to_terminate_anonymou); - Py_VISIT(traverse_module_state->__pyx_kp_u_Expected_to_terminate_string); - Py_VISIT(traverse_module_state->__pyx_n_s_FILENAME); - Py_VISIT(traverse_module_state->__pyx_n_u_FILENAME); - Py_VISIT(traverse_module_state->__pyx_n_s_FLOAT); - Py_VISIT(traverse_module_state->__pyx_n_u_FLOAT); - Py_VISIT(traverse_module_state->__pyx_n_s_FeatureLibError); - Py_VISIT(traverse_module_state->__pyx_n_s_FeatureLibLocation); - Py_VISIT(traverse_module_state->__pyx_n_s_FileNotFoundError); - Py_VISIT(traverse_module_state->__pyx_n_s_GLYPHCLASS); - Py_VISIT(traverse_module_state->__pyx_n_u_GLYPHCLASS); - Py_VISIT(traverse_module_state->__pyx_kp_u_Glyph_class_names_must_consist_o); - Py_VISIT(traverse_module_state->__pyx_kp_u_Glyph_class_names_must_not_be_lo); - Py_VISIT(traverse_module_state->__pyx_n_s_HEXADECIMAL); - Py_VISIT(traverse_module_state->__pyx_n_u_HEXADECIMAL); - Py_VISIT(traverse_module_state->__pyx_n_s_ImportError); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludedFeaNotFound); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer___init); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer___iter); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer___next); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer_make_lexer); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer_next); - Py_VISIT(traverse_module_state->__pyx_n_s_IncludingLexer_scan_anonymous_bl); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer___init); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer___iter); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer___next); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_location); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_next); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_next_2); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_scan_anonymous_block); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_scan_over); - Py_VISIT(traverse_module_state->__pyx_n_s_Lexer_scan_until); - Py_VISIT(traverse_module_state->__pyx_kp_s_Lexer_that_does_not_follow_inclu); - Py_VISIT(traverse_module_state->__pyx_kp_s_Lib_fontTools_feaLib_lexer_py); - Py_VISIT(traverse_module_state->__pyx_n_s_MODE_FILENAME); - Py_VISIT(traverse_module_state->__pyx_n_s_MODE_NORMAL); - Py_VISIT(traverse_module_state->__pyx_n_s_NAME); - Py_VISIT(traverse_module_state->__pyx_n_u_NAME); - Py_VISIT(traverse_module_state->__pyx_n_s_NEWLINE); - Py_VISIT(traverse_module_state->__pyx_n_u_NEWLINE); - Py_VISIT(traverse_module_state->__pyx_n_u_NORMAL); - Py_VISIT(traverse_module_state->__pyx_n_s_NUMBER); - Py_VISIT(traverse_module_state->__pyx_n_u_NUMBER); - Py_VISIT(traverse_module_state->__pyx_n_s_NUMBERS); - Py_VISIT(traverse_module_state->__pyx_n_s_NonIncludingLexer); - Py_VISIT(traverse_module_state->__pyx_n_s_NonIncludingLexer___next); - Py_VISIT(traverse_module_state->__pyx_n_s_OCTAL); - Py_VISIT(traverse_module_state->__pyx_n_u_OCTAL); - Py_VISIT(traverse_module_state->__pyx_n_s_RE_GLYPHCLASS); - Py_VISIT(traverse_module_state->__pyx_n_s_STRING); - Py_VISIT(traverse_module_state->__pyx_n_u_STRING); - Py_VISIT(traverse_module_state->__pyx_n_s_SYMBOL); - Py_VISIT(traverse_module_state->__pyx_n_u_SYMBOL); - Py_VISIT(traverse_module_state->__pyx_n_s_StopIteration); - Py_VISIT(traverse_module_state->__pyx_kp_u_Too_many_recursive_includes); - Py_VISIT(traverse_module_state->__pyx_kp_u_Unexpected_character_r); - Py_VISIT(traverse_module_state->__pyx_kp_u__10); - Py_VISIT(traverse_module_state->__pyx_kp_u__11); - Py_VISIT(traverse_module_state->__pyx_kp_u__12); - Py_VISIT(traverse_module_state->__pyx_n_s__13); - Py_VISIT(traverse_module_state->__pyx_kp_u__16); - Py_VISIT(traverse_module_state->__pyx_kp_u__17); - Py_VISIT(traverse_module_state->__pyx_kp_u__18); - Py_VISIT(traverse_module_state->__pyx_kp_u__19); - Py_VISIT(traverse_module_state->__pyx_kp_u__2); - Py_VISIT(traverse_module_state->__pyx_kp_u__20); - Py_VISIT(traverse_module_state->__pyx_kp_u__3); - Py_VISIT(traverse_module_state->__pyx_kp_u__4); - Py_VISIT(traverse_module_state->__pyx_kp_u__5); - Py_VISIT(traverse_module_state->__pyx_n_s__51); - Py_VISIT(traverse_module_state->__pyx_kp_u__6); - Py_VISIT(traverse_module_state->__pyx_kp_u__7); - Py_VISIT(traverse_module_state->__pyx_kp_u__8); - Py_VISIT(traverse_module_state->__pyx_kp_u__9); - Py_VISIT(traverse_module_state->__pyx_n_s_append); - Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines); - Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem); - Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback); - Py_VISIT(traverse_module_state->__pyx_n_s_close); - Py_VISIT(traverse_module_state->__pyx_n_s_closing); - Py_VISIT(traverse_module_state->__pyx_n_s_column); - Py_VISIT(traverse_module_state->__pyx_n_s_compile); - Py_VISIT(traverse_module_state->__pyx_n_s_cur_char); - Py_VISIT(traverse_module_state->__pyx_n_s_curpath); - Py_VISIT(traverse_module_state->__pyx_n_s_data); - Py_VISIT(traverse_module_state->__pyx_n_s_dict); - Py_VISIT(traverse_module_state->__pyx_n_s_dirname); - Py_VISIT(traverse_module_state->__pyx_n_s_doc); - Py_VISIT(traverse_module_state->__pyx_n_s_encoding); - Py_VISIT(traverse_module_state->__pyx_n_s_err); - Py_VISIT(traverse_module_state->__pyx_n_s_featurefile); - Py_VISIT(traverse_module_state->__pyx_n_s_featurefilepath); - Py_VISIT(traverse_module_state->__pyx_kp_u_features); - Py_VISIT(traverse_module_state->__pyx_n_s_file_or_path); - Py_VISIT(traverse_module_state->__pyx_n_s_filename); - Py_VISIT(traverse_module_state->__pyx_n_s_filename_2); - Py_VISIT(traverse_module_state->__pyx_n_s_fileobj); - Py_VISIT(traverse_module_state->__pyx_n_s_fname_location); - Py_VISIT(traverse_module_state->__pyx_n_s_fname_token); - Py_VISIT(traverse_module_state->__pyx_n_s_fname_type); - Py_VISIT(traverse_module_state->__pyx_n_s_fontTools_feaLib_error); - Py_VISIT(traverse_module_state->__pyx_n_s_fontTools_feaLib_lexer); - Py_VISIT(traverse_module_state->__pyx_n_s_fontTools_feaLib_location); - Py_VISIT(traverse_module_state->__pyx_n_s_getcwd); - Py_VISIT(traverse_module_state->__pyx_n_s_glyphclass); - Py_VISIT(traverse_module_state->__pyx_n_s_import); - Py_VISIT(traverse_module_state->__pyx_n_u_include); - Py_VISIT(traverse_module_state->__pyx_n_s_includeDir); - Py_VISIT(traverse_module_state->__pyx_n_s_init); - Py_VISIT(traverse_module_state->__pyx_n_s_init_subclass); - Py_VISIT(traverse_module_state->__pyx_n_s_initializing); - Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine); - Py_VISIT(traverse_module_state->__pyx_n_s_isabs); - Py_VISIT(traverse_module_state->__pyx_n_s_iter); - Py_VISIT(traverse_module_state->__pyx_n_s_join); - Py_VISIT(traverse_module_state->__pyx_n_s_lexer); - Py_VISIT(traverse_module_state->__pyx_n_s_lexers); - Py_VISIT(traverse_module_state->__pyx_n_s_limit); - Py_VISIT(traverse_module_state->__pyx_n_s_line); - Py_VISIT(traverse_module_state->__pyx_n_s_line_start); - Py_VISIT(traverse_module_state->__pyx_n_s_location); - Py_VISIT(traverse_module_state->__pyx_n_s_location_2); - Py_VISIT(traverse_module_state->__pyx_n_s_main); - Py_VISIT(traverse_module_state->__pyx_n_s_make_lexer); - Py_VISIT(traverse_module_state->__pyx_n_s_match); - Py_VISIT(traverse_module_state->__pyx_n_s_maxsplit); - Py_VISIT(traverse_module_state->__pyx_n_s_metaclass); - Py_VISIT(traverse_module_state->__pyx_n_s_mode); - Py_VISIT(traverse_module_state->__pyx_n_s_module); - Py_VISIT(traverse_module_state->__pyx_n_s_mro_entries); - Py_VISIT(traverse_module_state->__pyx_n_u_name); - Py_VISIT(traverse_module_state->__pyx_n_s_name_2); - Py_VISIT(traverse_module_state->__pyx_n_s_next); - Py_VISIT(traverse_module_state->__pyx_n_s_next_2); - Py_VISIT(traverse_module_state->__pyx_n_s_next_3); - Py_VISIT(traverse_module_state->__pyx_n_s_next_char); - Py_VISIT(traverse_module_state->__pyx_n_s_object); - Py_VISIT(traverse_module_state->__pyx_n_s_open); - Py_VISIT(traverse_module_state->__pyx_n_s_os); - Py_VISIT(traverse_module_state->__pyx_n_s_p); - Py_VISIT(traverse_module_state->__pyx_n_s_path); - Py_VISIT(traverse_module_state->__pyx_n_s_pop); - Py_VISIT(traverse_module_state->__pyx_n_s_pos); - Py_VISIT(traverse_module_state->__pyx_n_s_prepare); - Py_VISIT(traverse_module_state->__pyx_n_s_qualname); - Py_VISIT(traverse_module_state->__pyx_n_u_r); - Py_VISIT(traverse_module_state->__pyx_n_s_re); - Py_VISIT(traverse_module_state->__pyx_n_s_read); - Py_VISIT(traverse_module_state->__pyx_n_u_read); - Py_VISIT(traverse_module_state->__pyx_n_s_regexp); - Py_VISIT(traverse_module_state->__pyx_kp_u_s); - Py_VISIT(traverse_module_state->__pyx_kp_u_s_2); - Py_VISIT(traverse_module_state->__pyx_n_s_scan_anonymous_block); - Py_VISIT(traverse_module_state->__pyx_n_s_scan_over); - Py_VISIT(traverse_module_state->__pyx_n_s_scan_until); - Py_VISIT(traverse_module_state->__pyx_n_s_self); - Py_VISIT(traverse_module_state->__pyx_n_s_set_name); - Py_VISIT(traverse_module_state->__pyx_n_s_spec); - Py_VISIT(traverse_module_state->__pyx_n_s_split); - Py_VISIT(traverse_module_state->__pyx_n_s_start); - Py_VISIT(traverse_module_state->__pyx_n_s_staticmethod); - Py_VISIT(traverse_module_state->__pyx_n_s_stop_at); - Py_VISIT(traverse_module_state->__pyx_n_s_string); - Py_VISIT(traverse_module_state->__pyx_n_s_strip); - Py_VISIT(traverse_module_state->__pyx_n_s_sub); - Py_VISIT(traverse_module_state->__pyx_n_s_super); - Py_VISIT(traverse_module_state->__pyx_n_s_tag); - Py_VISIT(traverse_module_state->__pyx_n_s_test); - Py_VISIT(traverse_module_state->__pyx_n_s_text); - Py_VISIT(traverse_module_state->__pyx_n_s_text_2); - Py_VISIT(traverse_module_state->__pyx_n_s_text_length); - Py_VISIT(traverse_module_state->__pyx_n_s_token); - Py_VISIT(traverse_module_state->__pyx_n_s_token_type); - Py_VISIT(traverse_module_state->__pyx_kp_u_utf_8); - Py_VISIT(traverse_module_state->__pyx_n_s_valid); - Py_VISIT(traverse_module_state->__pyx_n_u_xX); - Py_VISIT(traverse_module_state->__pyx_int_0); - Py_VISIT(traverse_module_state->__pyx_int_1); - Py_VISIT(traverse_module_state->__pyx_int_2); - Py_VISIT(traverse_module_state->__pyx_int_8); - Py_VISIT(traverse_module_state->__pyx_int_10); - Py_VISIT(traverse_module_state->__pyx_int_16); - Py_VISIT(traverse_module_state->__pyx_tuple__14); - Py_VISIT(traverse_module_state->__pyx_tuple__15); - Py_VISIT(traverse_module_state->__pyx_tuple__21); - Py_VISIT(traverse_module_state->__pyx_tuple__23); - Py_VISIT(traverse_module_state->__pyx_tuple__26); - Py_VISIT(traverse_module_state->__pyx_tuple__28); - Py_VISIT(traverse_module_state->__pyx_tuple__30); - Py_VISIT(traverse_module_state->__pyx_tuple__32); - Py_VISIT(traverse_module_state->__pyx_tuple__34); - Py_VISIT(traverse_module_state->__pyx_tuple__36); - Py_VISIT(traverse_module_state->__pyx_tuple__38); - Py_VISIT(traverse_module_state->__pyx_tuple__39); - Py_VISIT(traverse_module_state->__pyx_tuple__40); - Py_VISIT(traverse_module_state->__pyx_tuple__44); - Py_VISIT(traverse_module_state->__pyx_tuple__46); - Py_VISIT(traverse_module_state->__pyx_tuple__48); - Py_VISIT(traverse_module_state->__pyx_codeobj__22); - Py_VISIT(traverse_module_state->__pyx_codeobj__24); - Py_VISIT(traverse_module_state->__pyx_codeobj__25); - Py_VISIT(traverse_module_state->__pyx_codeobj__27); - Py_VISIT(traverse_module_state->__pyx_codeobj__29); - Py_VISIT(traverse_module_state->__pyx_codeobj__31); - Py_VISIT(traverse_module_state->__pyx_codeobj__33); - Py_VISIT(traverse_module_state->__pyx_codeobj__35); - Py_VISIT(traverse_module_state->__pyx_codeobj__37); - Py_VISIT(traverse_module_state->__pyx_codeobj__41); - Py_VISIT(traverse_module_state->__pyx_codeobj__42); - Py_VISIT(traverse_module_state->__pyx_codeobj__43); - Py_VISIT(traverse_module_state->__pyx_codeobj__45); - Py_VISIT(traverse_module_state->__pyx_codeobj__47); - Py_VISIT(traverse_module_state->__pyx_codeobj__49); - Py_VISIT(traverse_module_state->__pyx_codeobj__50); - return 0; -} -#endif -/* #### Code section: module_state_defines ### */ -#define __pyx_d __pyx_mstate_global->__pyx_d -#define __pyx_b __pyx_mstate_global->__pyx_b -#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime -#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple -#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes -#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode -#ifdef __Pyx_CyFunction_USED -#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType -#endif -#ifdef __Pyx_FusedFunction_USED -#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType -#endif -#ifdef __Pyx_Generator_USED -#define __pyx_GeneratorType __pyx_mstate_global->__pyx_GeneratorType -#endif -#ifdef __Pyx_IterableCoroutine_USED -#define __pyx_IterableCoroutineType __pyx_mstate_global->__pyx_IterableCoroutineType -#endif -#ifdef __Pyx_Coroutine_USED -#define __pyx_CoroutineAwaitType __pyx_mstate_global->__pyx_CoroutineAwaitType -#endif -#ifdef __Pyx_Coroutine_USED -#define __pyx_CoroutineType __pyx_mstate_global->__pyx_CoroutineType -#endif -#if CYTHON_USE_MODULE_STATE -#endif -#if CYTHON_USE_MODULE_STATE -#endif -#define __pyx_kp_u_ __pyx_mstate_global->__pyx_kp_u_ -#define __pyx_kp_u_0 __pyx_mstate_global->__pyx_kp_u_0 -#define __pyx_kp_u_0123456789 __pyx_mstate_global->__pyx_kp_u_0123456789 -#define __pyx_kp_u_0123456789ABCDEFabcdef __pyx_mstate_global->__pyx_kp_u_0123456789ABCDEFabcdef -#define __pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef __pyx_mstate_global->__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef -#define __pyx_n_s_ANONYMOUS_BLOCK __pyx_mstate_global->__pyx_n_s_ANONYMOUS_BLOCK -#define __pyx_n_u_ANONYMOUS_BLOCK __pyx_mstate_global->__pyx_n_u_ANONYMOUS_BLOCK -#define __pyx_kp_s_A_Lexer_that_follows_include_sta __pyx_mstate_global->__pyx_kp_s_A_Lexer_that_follows_include_sta -#define __pyx_kp_u_A_Za_z_0_9 __pyx_mstate_global->__pyx_kp_u_A_Za_z_0_9 -#define __pyx_n_s_CHAR_DIGIT __pyx_mstate_global->__pyx_n_s_CHAR_DIGIT -#define __pyx_n_s_CHAR_HEXDIGIT __pyx_mstate_global->__pyx_n_s_CHAR_HEXDIGIT -#define __pyx_n_s_CHAR_LETTER __pyx_mstate_global->__pyx_n_s_CHAR_LETTER -#define __pyx_n_s_CHAR_NAME_CONTINUATION __pyx_mstate_global->__pyx_n_s_CHAR_NAME_CONTINUATION -#define __pyx_n_s_CHAR_NAME_START __pyx_mstate_global->__pyx_n_s_CHAR_NAME_START -#define __pyx_n_s_CHAR_NEWLINE __pyx_mstate_global->__pyx_n_s_CHAR_NEWLINE -#define __pyx_n_s_CHAR_SYMBOL __pyx_mstate_global->__pyx_n_s_CHAR_SYMBOL -#define __pyx_n_s_CHAR_WHITESPACE __pyx_mstate_global->__pyx_n_s_CHAR_WHITESPACE -#define __pyx_n_s_CID __pyx_mstate_global->__pyx_n_s_CID -#define __pyx_n_u_CID __pyx_mstate_global->__pyx_n_u_CID -#define __pyx_n_s_COMMENT __pyx_mstate_global->__pyx_n_s_COMMENT -#define __pyx_n_u_COMMENT __pyx_mstate_global->__pyx_n_u_COMMENT -#define __pyx_kp_u_Expected_after_file_name __pyx_mstate_global->__pyx_kp_u_Expected_after_file_name -#define __pyx_kp_u_Expected_before_file_name __pyx_mstate_global->__pyx_kp_u_Expected_before_file_name -#define __pyx_kp_u_Expected_file_name __pyx_mstate_global->__pyx_kp_u_Expected_file_name -#define __pyx_kp_u_Expected_glyph_class_name __pyx_mstate_global->__pyx_kp_u_Expected_glyph_class_name -#define __pyx_kp_u_Expected_s_to_terminate_anonymou __pyx_mstate_global->__pyx_kp_u_Expected_s_to_terminate_anonymou -#define __pyx_kp_u_Expected_to_terminate_string __pyx_mstate_global->__pyx_kp_u_Expected_to_terminate_string -#define __pyx_n_s_FILENAME __pyx_mstate_global->__pyx_n_s_FILENAME -#define __pyx_n_u_FILENAME __pyx_mstate_global->__pyx_n_u_FILENAME -#define __pyx_n_s_FLOAT __pyx_mstate_global->__pyx_n_s_FLOAT -#define __pyx_n_u_FLOAT __pyx_mstate_global->__pyx_n_u_FLOAT -#define __pyx_n_s_FeatureLibError __pyx_mstate_global->__pyx_n_s_FeatureLibError -#define __pyx_n_s_FeatureLibLocation __pyx_mstate_global->__pyx_n_s_FeatureLibLocation -#define __pyx_n_s_FileNotFoundError __pyx_mstate_global->__pyx_n_s_FileNotFoundError -#define __pyx_n_s_GLYPHCLASS __pyx_mstate_global->__pyx_n_s_GLYPHCLASS -#define __pyx_n_u_GLYPHCLASS __pyx_mstate_global->__pyx_n_u_GLYPHCLASS -#define __pyx_kp_u_Glyph_class_names_must_consist_o __pyx_mstate_global->__pyx_kp_u_Glyph_class_names_must_consist_o -#define __pyx_kp_u_Glyph_class_names_must_not_be_lo __pyx_mstate_global->__pyx_kp_u_Glyph_class_names_must_not_be_lo -#define __pyx_n_s_HEXADECIMAL __pyx_mstate_global->__pyx_n_s_HEXADECIMAL -#define __pyx_n_u_HEXADECIMAL __pyx_mstate_global->__pyx_n_u_HEXADECIMAL -#define __pyx_n_s_ImportError __pyx_mstate_global->__pyx_n_s_ImportError -#define __pyx_n_s_IncludedFeaNotFound __pyx_mstate_global->__pyx_n_s_IncludedFeaNotFound -#define __pyx_n_s_IncludingLexer __pyx_mstate_global->__pyx_n_s_IncludingLexer -#define __pyx_n_s_IncludingLexer___init __pyx_mstate_global->__pyx_n_s_IncludingLexer___init -#define __pyx_n_s_IncludingLexer___iter __pyx_mstate_global->__pyx_n_s_IncludingLexer___iter -#define __pyx_n_s_IncludingLexer___next __pyx_mstate_global->__pyx_n_s_IncludingLexer___next -#define __pyx_n_s_IncludingLexer_make_lexer __pyx_mstate_global->__pyx_n_s_IncludingLexer_make_lexer -#define __pyx_n_s_IncludingLexer_next __pyx_mstate_global->__pyx_n_s_IncludingLexer_next -#define __pyx_n_s_IncludingLexer_scan_anonymous_bl __pyx_mstate_global->__pyx_n_s_IncludingLexer_scan_anonymous_bl -#define __pyx_n_s_Lexer __pyx_mstate_global->__pyx_n_s_Lexer -#define __pyx_n_s_Lexer___init __pyx_mstate_global->__pyx_n_s_Lexer___init -#define __pyx_n_s_Lexer___iter __pyx_mstate_global->__pyx_n_s_Lexer___iter -#define __pyx_n_s_Lexer___next __pyx_mstate_global->__pyx_n_s_Lexer___next -#define __pyx_n_s_Lexer_location __pyx_mstate_global->__pyx_n_s_Lexer_location -#define __pyx_n_s_Lexer_next __pyx_mstate_global->__pyx_n_s_Lexer_next -#define __pyx_n_s_Lexer_next_2 __pyx_mstate_global->__pyx_n_s_Lexer_next_2 -#define __pyx_n_s_Lexer_scan_anonymous_block __pyx_mstate_global->__pyx_n_s_Lexer_scan_anonymous_block -#define __pyx_n_s_Lexer_scan_over __pyx_mstate_global->__pyx_n_s_Lexer_scan_over -#define __pyx_n_s_Lexer_scan_until __pyx_mstate_global->__pyx_n_s_Lexer_scan_until -#define __pyx_kp_s_Lexer_that_does_not_follow_inclu __pyx_mstate_global->__pyx_kp_s_Lexer_that_does_not_follow_inclu -#define __pyx_kp_s_Lib_fontTools_feaLib_lexer_py __pyx_mstate_global->__pyx_kp_s_Lib_fontTools_feaLib_lexer_py -#define __pyx_n_s_MODE_FILENAME __pyx_mstate_global->__pyx_n_s_MODE_FILENAME -#define __pyx_n_s_MODE_NORMAL __pyx_mstate_global->__pyx_n_s_MODE_NORMAL -#define __pyx_n_s_NAME __pyx_mstate_global->__pyx_n_s_NAME -#define __pyx_n_u_NAME __pyx_mstate_global->__pyx_n_u_NAME -#define __pyx_n_s_NEWLINE __pyx_mstate_global->__pyx_n_s_NEWLINE -#define __pyx_n_u_NEWLINE __pyx_mstate_global->__pyx_n_u_NEWLINE -#define __pyx_n_u_NORMAL __pyx_mstate_global->__pyx_n_u_NORMAL -#define __pyx_n_s_NUMBER __pyx_mstate_global->__pyx_n_s_NUMBER -#define __pyx_n_u_NUMBER __pyx_mstate_global->__pyx_n_u_NUMBER -#define __pyx_n_s_NUMBERS __pyx_mstate_global->__pyx_n_s_NUMBERS -#define __pyx_n_s_NonIncludingLexer __pyx_mstate_global->__pyx_n_s_NonIncludingLexer -#define __pyx_n_s_NonIncludingLexer___next __pyx_mstate_global->__pyx_n_s_NonIncludingLexer___next -#define __pyx_n_s_OCTAL __pyx_mstate_global->__pyx_n_s_OCTAL -#define __pyx_n_u_OCTAL __pyx_mstate_global->__pyx_n_u_OCTAL -#define __pyx_n_s_RE_GLYPHCLASS __pyx_mstate_global->__pyx_n_s_RE_GLYPHCLASS -#define __pyx_n_s_STRING __pyx_mstate_global->__pyx_n_s_STRING -#define __pyx_n_u_STRING __pyx_mstate_global->__pyx_n_u_STRING -#define __pyx_n_s_SYMBOL __pyx_mstate_global->__pyx_n_s_SYMBOL -#define __pyx_n_u_SYMBOL __pyx_mstate_global->__pyx_n_u_SYMBOL -#define __pyx_n_s_StopIteration __pyx_mstate_global->__pyx_n_s_StopIteration -#define __pyx_kp_u_Too_many_recursive_includes __pyx_mstate_global->__pyx_kp_u_Too_many_recursive_includes -#define __pyx_kp_u_Unexpected_character_r __pyx_mstate_global->__pyx_kp_u_Unexpected_character_r -#define __pyx_kp_u__10 __pyx_mstate_global->__pyx_kp_u__10 -#define __pyx_kp_u__11 __pyx_mstate_global->__pyx_kp_u__11 -#define __pyx_kp_u__12 __pyx_mstate_global->__pyx_kp_u__12 -#define __pyx_n_s__13 __pyx_mstate_global->__pyx_n_s__13 -#define __pyx_kp_u__16 __pyx_mstate_global->__pyx_kp_u__16 -#define __pyx_kp_u__17 __pyx_mstate_global->__pyx_kp_u__17 -#define __pyx_kp_u__18 __pyx_mstate_global->__pyx_kp_u__18 -#define __pyx_kp_u__19 __pyx_mstate_global->__pyx_kp_u__19 -#define __pyx_kp_u__2 __pyx_mstate_global->__pyx_kp_u__2 -#define __pyx_kp_u__20 __pyx_mstate_global->__pyx_kp_u__20 -#define __pyx_kp_u__3 __pyx_mstate_global->__pyx_kp_u__3 -#define __pyx_kp_u__4 __pyx_mstate_global->__pyx_kp_u__4 -#define __pyx_kp_u__5 __pyx_mstate_global->__pyx_kp_u__5 -#define __pyx_n_s__51 __pyx_mstate_global->__pyx_n_s__51 -#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6 -#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7 -#define __pyx_kp_u__8 __pyx_mstate_global->__pyx_kp_u__8 -#define __pyx_kp_u__9 __pyx_mstate_global->__pyx_kp_u__9 -#define __pyx_n_s_append __pyx_mstate_global->__pyx_n_s_append -#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines -#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem -#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback -#define __pyx_n_s_close __pyx_mstate_global->__pyx_n_s_close -#define __pyx_n_s_closing __pyx_mstate_global->__pyx_n_s_closing -#define __pyx_n_s_column __pyx_mstate_global->__pyx_n_s_column -#define __pyx_n_s_compile __pyx_mstate_global->__pyx_n_s_compile -#define __pyx_n_s_cur_char __pyx_mstate_global->__pyx_n_s_cur_char -#define __pyx_n_s_curpath __pyx_mstate_global->__pyx_n_s_curpath -#define __pyx_n_s_data __pyx_mstate_global->__pyx_n_s_data -#define __pyx_n_s_dict __pyx_mstate_global->__pyx_n_s_dict -#define __pyx_n_s_dirname __pyx_mstate_global->__pyx_n_s_dirname -#define __pyx_n_s_doc __pyx_mstate_global->__pyx_n_s_doc -#define __pyx_n_s_encoding __pyx_mstate_global->__pyx_n_s_encoding -#define __pyx_n_s_err __pyx_mstate_global->__pyx_n_s_err -#define __pyx_n_s_featurefile __pyx_mstate_global->__pyx_n_s_featurefile -#define __pyx_n_s_featurefilepath __pyx_mstate_global->__pyx_n_s_featurefilepath -#define __pyx_kp_u_features __pyx_mstate_global->__pyx_kp_u_features -#define __pyx_n_s_file_or_path __pyx_mstate_global->__pyx_n_s_file_or_path -#define __pyx_n_s_filename __pyx_mstate_global->__pyx_n_s_filename -#define __pyx_n_s_filename_2 __pyx_mstate_global->__pyx_n_s_filename_2 -#define __pyx_n_s_fileobj __pyx_mstate_global->__pyx_n_s_fileobj -#define __pyx_n_s_fname_location __pyx_mstate_global->__pyx_n_s_fname_location -#define __pyx_n_s_fname_token __pyx_mstate_global->__pyx_n_s_fname_token -#define __pyx_n_s_fname_type __pyx_mstate_global->__pyx_n_s_fname_type -#define __pyx_n_s_fontTools_feaLib_error __pyx_mstate_global->__pyx_n_s_fontTools_feaLib_error -#define __pyx_n_s_fontTools_feaLib_lexer __pyx_mstate_global->__pyx_n_s_fontTools_feaLib_lexer -#define __pyx_n_s_fontTools_feaLib_location __pyx_mstate_global->__pyx_n_s_fontTools_feaLib_location -#define __pyx_n_s_getcwd __pyx_mstate_global->__pyx_n_s_getcwd -#define __pyx_n_s_glyphclass __pyx_mstate_global->__pyx_n_s_glyphclass -#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import -#define __pyx_n_u_include __pyx_mstate_global->__pyx_n_u_include -#define __pyx_n_s_includeDir __pyx_mstate_global->__pyx_n_s_includeDir -#define __pyx_n_s_init __pyx_mstate_global->__pyx_n_s_init -#define __pyx_n_s_init_subclass __pyx_mstate_global->__pyx_n_s_init_subclass -#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing -#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine -#define __pyx_n_s_isabs __pyx_mstate_global->__pyx_n_s_isabs -#define __pyx_n_s_iter __pyx_mstate_global->__pyx_n_s_iter -#define __pyx_n_s_join __pyx_mstate_global->__pyx_n_s_join -#define __pyx_n_s_lexer __pyx_mstate_global->__pyx_n_s_lexer -#define __pyx_n_s_lexers __pyx_mstate_global->__pyx_n_s_lexers -#define __pyx_n_s_limit __pyx_mstate_global->__pyx_n_s_limit -#define __pyx_n_s_line __pyx_mstate_global->__pyx_n_s_line -#define __pyx_n_s_line_start __pyx_mstate_global->__pyx_n_s_line_start -#define __pyx_n_s_location __pyx_mstate_global->__pyx_n_s_location -#define __pyx_n_s_location_2 __pyx_mstate_global->__pyx_n_s_location_2 -#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main -#define __pyx_n_s_make_lexer __pyx_mstate_global->__pyx_n_s_make_lexer -#define __pyx_n_s_match __pyx_mstate_global->__pyx_n_s_match -#define __pyx_n_s_maxsplit __pyx_mstate_global->__pyx_n_s_maxsplit -#define __pyx_n_s_metaclass __pyx_mstate_global->__pyx_n_s_metaclass -#define __pyx_n_s_mode __pyx_mstate_global->__pyx_n_s_mode -#define __pyx_n_s_module __pyx_mstate_global->__pyx_n_s_module -#define __pyx_n_s_mro_entries __pyx_mstate_global->__pyx_n_s_mro_entries -#define __pyx_n_u_name __pyx_mstate_global->__pyx_n_u_name -#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2 -#define __pyx_n_s_next __pyx_mstate_global->__pyx_n_s_next -#define __pyx_n_s_next_2 __pyx_mstate_global->__pyx_n_s_next_2 -#define __pyx_n_s_next_3 __pyx_mstate_global->__pyx_n_s_next_3 -#define __pyx_n_s_next_char __pyx_mstate_global->__pyx_n_s_next_char -#define __pyx_n_s_object __pyx_mstate_global->__pyx_n_s_object -#define __pyx_n_s_open __pyx_mstate_global->__pyx_n_s_open -#define __pyx_n_s_os __pyx_mstate_global->__pyx_n_s_os -#define __pyx_n_s_p __pyx_mstate_global->__pyx_n_s_p -#define __pyx_n_s_path __pyx_mstate_global->__pyx_n_s_path -#define __pyx_n_s_pop __pyx_mstate_global->__pyx_n_s_pop -#define __pyx_n_s_pos __pyx_mstate_global->__pyx_n_s_pos -#define __pyx_n_s_prepare __pyx_mstate_global->__pyx_n_s_prepare -#define __pyx_n_s_qualname __pyx_mstate_global->__pyx_n_s_qualname -#define __pyx_n_u_r __pyx_mstate_global->__pyx_n_u_r -#define __pyx_n_s_re __pyx_mstate_global->__pyx_n_s_re -#define __pyx_n_s_read __pyx_mstate_global->__pyx_n_s_read -#define __pyx_n_u_read __pyx_mstate_global->__pyx_n_u_read -#define __pyx_n_s_regexp __pyx_mstate_global->__pyx_n_s_regexp -#define __pyx_kp_u_s __pyx_mstate_global->__pyx_kp_u_s -#define __pyx_kp_u_s_2 __pyx_mstate_global->__pyx_kp_u_s_2 -#define __pyx_n_s_scan_anonymous_block __pyx_mstate_global->__pyx_n_s_scan_anonymous_block -#define __pyx_n_s_scan_over __pyx_mstate_global->__pyx_n_s_scan_over -#define __pyx_n_s_scan_until __pyx_mstate_global->__pyx_n_s_scan_until -#define __pyx_n_s_self __pyx_mstate_global->__pyx_n_s_self -#define __pyx_n_s_set_name __pyx_mstate_global->__pyx_n_s_set_name -#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec -#define __pyx_n_s_split __pyx_mstate_global->__pyx_n_s_split -#define __pyx_n_s_start __pyx_mstate_global->__pyx_n_s_start -#define __pyx_n_s_staticmethod __pyx_mstate_global->__pyx_n_s_staticmethod -#define __pyx_n_s_stop_at __pyx_mstate_global->__pyx_n_s_stop_at -#define __pyx_n_s_string __pyx_mstate_global->__pyx_n_s_string -#define __pyx_n_s_strip __pyx_mstate_global->__pyx_n_s_strip -#define __pyx_n_s_sub __pyx_mstate_global->__pyx_n_s_sub -#define __pyx_n_s_super __pyx_mstate_global->__pyx_n_s_super -#define __pyx_n_s_tag __pyx_mstate_global->__pyx_n_s_tag -#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test -#define __pyx_n_s_text __pyx_mstate_global->__pyx_n_s_text -#define __pyx_n_s_text_2 __pyx_mstate_global->__pyx_n_s_text_2 -#define __pyx_n_s_text_length __pyx_mstate_global->__pyx_n_s_text_length -#define __pyx_n_s_token __pyx_mstate_global->__pyx_n_s_token -#define __pyx_n_s_token_type __pyx_mstate_global->__pyx_n_s_token_type -#define __pyx_kp_u_utf_8 __pyx_mstate_global->__pyx_kp_u_utf_8 -#define __pyx_n_s_valid __pyx_mstate_global->__pyx_n_s_valid -#define __pyx_n_u_xX __pyx_mstate_global->__pyx_n_u_xX -#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0 -#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1 -#define __pyx_int_2 __pyx_mstate_global->__pyx_int_2 -#define __pyx_int_8 __pyx_mstate_global->__pyx_int_8 -#define __pyx_int_10 __pyx_mstate_global->__pyx_int_10 -#define __pyx_int_16 __pyx_mstate_global->__pyx_int_16 -#define __pyx_tuple__14 __pyx_mstate_global->__pyx_tuple__14 -#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15 -#define __pyx_tuple__21 __pyx_mstate_global->__pyx_tuple__21 -#define __pyx_tuple__23 __pyx_mstate_global->__pyx_tuple__23 -#define __pyx_tuple__26 __pyx_mstate_global->__pyx_tuple__26 -#define __pyx_tuple__28 __pyx_mstate_global->__pyx_tuple__28 -#define __pyx_tuple__30 __pyx_mstate_global->__pyx_tuple__30 -#define __pyx_tuple__32 __pyx_mstate_global->__pyx_tuple__32 -#define __pyx_tuple__34 __pyx_mstate_global->__pyx_tuple__34 -#define __pyx_tuple__36 __pyx_mstate_global->__pyx_tuple__36 -#define __pyx_tuple__38 __pyx_mstate_global->__pyx_tuple__38 -#define __pyx_tuple__39 __pyx_mstate_global->__pyx_tuple__39 -#define __pyx_tuple__40 __pyx_mstate_global->__pyx_tuple__40 -#define __pyx_tuple__44 __pyx_mstate_global->__pyx_tuple__44 -#define __pyx_tuple__46 __pyx_mstate_global->__pyx_tuple__46 -#define __pyx_tuple__48 __pyx_mstate_global->__pyx_tuple__48 -#define __pyx_codeobj__22 __pyx_mstate_global->__pyx_codeobj__22 -#define __pyx_codeobj__24 __pyx_mstate_global->__pyx_codeobj__24 -#define __pyx_codeobj__25 __pyx_mstate_global->__pyx_codeobj__25 -#define __pyx_codeobj__27 __pyx_mstate_global->__pyx_codeobj__27 -#define __pyx_codeobj__29 __pyx_mstate_global->__pyx_codeobj__29 -#define __pyx_codeobj__31 __pyx_mstate_global->__pyx_codeobj__31 -#define __pyx_codeobj__33 __pyx_mstate_global->__pyx_codeobj__33 -#define __pyx_codeobj__35 __pyx_mstate_global->__pyx_codeobj__35 -#define __pyx_codeobj__37 __pyx_mstate_global->__pyx_codeobj__37 -#define __pyx_codeobj__41 __pyx_mstate_global->__pyx_codeobj__41 -#define __pyx_codeobj__42 __pyx_mstate_global->__pyx_codeobj__42 -#define __pyx_codeobj__43 __pyx_mstate_global->__pyx_codeobj__43 -#define __pyx_codeobj__45 __pyx_mstate_global->__pyx_codeobj__45 -#define __pyx_codeobj__47 __pyx_mstate_global->__pyx_codeobj__47 -#define __pyx_codeobj__49 __pyx_mstate_global->__pyx_codeobj__49 -#define __pyx_codeobj__50 __pyx_mstate_global->__pyx_codeobj__50 -/* #### Code section: module_code ### */ - -/* "fontTools/feaLib/lexer.py":43 - * MODE_FILENAME_ = "FILENAME" - * - * def __init__(self, text, filename): # <<<<<<<<<<<<<< - * self.filename_ = filename - * self.line_ = 1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_1__init__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer___init__, "Lexer.__init__(self, text, filename)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_1__init__ = {"__init__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_1__init__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer___init__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_1__init__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_text = 0; - PyObject *__pyx_v_filename = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_text,&__pyx_n_s_filename,0}; - PyObject* values[3] = {0,0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_text)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 1); __PYX_ERR(0, 43, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_filename)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 2); __PYX_ERR(0, 43, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(0, 43, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 3)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - } - __pyx_v_self = values[0]; - __pyx_v_text = values[1]; - __pyx_v_filename = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 43, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer___init__(__pyx_self, __pyx_v_self, __pyx_v_text, __pyx_v_filename); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_text, PyObject *__pyx_v_filename) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__init__", 0); - - /* "fontTools/feaLib/lexer.py":44 - * - * def __init__(self, text, filename): - * self.filename_ = filename # <<<<<<<<<<<<<< - * self.line_ = 1 - * self.pos_ = 0 - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_filename_2, __pyx_v_filename) < 0) __PYX_ERR(0, 44, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":45 - * def __init__(self, text, filename): - * self.filename_ = filename - * self.line_ = 1 # <<<<<<<<<<<<<< - * self.pos_ = 0 - * self.line_start_ = 0 - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line, __pyx_int_1) < 0) __PYX_ERR(0, 45, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":46 - * self.filename_ = filename - * self.line_ = 1 - * self.pos_ = 0 # <<<<<<<<<<<<<< - * self.line_start_ = 0 - * self.text_ = text - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_int_0) < 0) __PYX_ERR(0, 46, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":47 - * self.line_ = 1 - * self.pos_ = 0 - * self.line_start_ = 0 # <<<<<<<<<<<<<< - * self.text_ = text - * self.text_length_ = len(text) - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line_start, __pyx_int_0) < 0) __PYX_ERR(0, 47, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":48 - * self.pos_ = 0 - * self.line_start_ = 0 - * self.text_ = text # <<<<<<<<<<<<<< - * self.text_length_ = len(text) - * self.mode_ = Lexer.MODE_NORMAL_ - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_text_2, __pyx_v_text) < 0) __PYX_ERR(0, 48, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":49 - * self.line_start_ = 0 - * self.text_ = text - * self.text_length_ = len(text) # <<<<<<<<<<<<<< - * self.mode_ = Lexer.MODE_NORMAL_ - * - */ - __pyx_t_1 = PyObject_Length(__pyx_v_text); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 49, __pyx_L1_error) - __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_text_length, __pyx_t_2) < 0) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":50 - * self.text_ = text - * self.text_length_ = len(text) - * self.mode_ = Lexer.MODE_NORMAL_ # <<<<<<<<<<<<<< - * - * def __iter__(self): - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_MODE_NORMAL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_mode, __pyx_t_3) < 0) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":43 - * MODE_FILENAME_ = "FILENAME" - * - * def __init__(self, text, filename): # <<<<<<<<<<<<<< - * self.filename_ = filename - * self.line_ = 1 - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":52 - * self.mode_ = Lexer.MODE_NORMAL_ - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_3__iter__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_2__iter__, "Lexer.__iter__(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_3__iter__ = {"__iter__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_3__iter__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_2__iter__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_3__iter__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 52, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__iter__") < 0)) __PYX_ERR(0, 52, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__iter__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 52, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__iter__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_2__iter__(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__iter__", 0); - - /* "fontTools/feaLib/lexer.py":53 - * - * def __iter__(self): - * return self # <<<<<<<<<<<<<< - * - * def next(self): # Python 2 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self); - __pyx_r = __pyx_v_self; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":52 - * self.mode_ = Lexer.MODE_NORMAL_ - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":55 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_5next(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_4next, "Lexer.next(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_5next = {"next", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_5next, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_4next}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_5next(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("next (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 55, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "next") < 0)) __PYX_ERR(0, 55, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("next", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 55, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_4next(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("next", 0); - - /* "fontTools/feaLib/lexer.py":56 - * - * def next(self): # Python 2 - * return self.__next__() # <<<<<<<<<<<<<< - * - * def __next__(self): # Python 3 - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_next); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":55 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":58 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while True: - * token_type, token, location = self.next_() - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_7__next__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_6__next__, "Lexer.__next__(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_7__next__ = {"__next__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_7__next__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_6__next__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_7__next__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__next__") < 0)) __PYX_ERR(0, 58, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__next__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 58, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_6__next__(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_v_token_type = NULL; - PyObject *__pyx_v_token = NULL; - PyObject *__pyx_v_location = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *(*__pyx_t_7)(PyObject *); - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__next__", 0); - - /* "fontTools/feaLib/lexer.py":59 - * - * def __next__(self): # Python 3 - * while True: # <<<<<<<<<<<<<< - * token_type, token, location = self.next_() - * if token_type != Lexer.NEWLINE: - */ - while (1) { - - /* "fontTools/feaLib/lexer.py":60 - * def __next__(self): # Python 3 - * while True: - * token_type, token, location = self.next_() # <<<<<<<<<<<<<< - * if token_type != Lexer.NEWLINE: - * return (token_type, token, location) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_next_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { - PyObject* sequence = __pyx_t_1; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 3)) { - if (size > 3) __Pyx_RaiseTooManyValuesError(3); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 60, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 2); - } else { - __pyx_t_2 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - __pyx_t_5 = PyList_GET_ITEM(sequence, 2); - } - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_5); - #else - __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_6 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_6); - index = 0; __pyx_t_2 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_2)) goto __pyx_L5_unpacking_failed; - __Pyx_GOTREF(__pyx_t_2); - index = 1; __pyx_t_3 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - index = 2; __pyx_t_5 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L5_unpacking_failed; - __Pyx_GOTREF(__pyx_t_5); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 3) < 0) __PYX_ERR(0, 60, __pyx_L1_error) - __pyx_t_7 = NULL; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - goto __pyx_L6_unpacking_done; - __pyx_L5_unpacking_failed:; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_7 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 60, __pyx_L1_error) - __pyx_L6_unpacking_done:; - } - __Pyx_XDECREF_SET(__pyx_v_token_type, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_token, __pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF_SET(__pyx_v_location, __pyx_t_5); - __pyx_t_5 = 0; - - /* "fontTools/feaLib/lexer.py":61 - * while True: - * token_type, token, location = self.next_() - * if token_type != Lexer.NEWLINE: # <<<<<<<<<<<<<< - * return (token_type, token, location) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_NEWLINE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_RichCompare(__pyx_v_token_type, __pyx_t_5, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 61, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":62 - * token_type, token, location = self.next_() - * if token_type != Lexer.NEWLINE: - * return (token_type, token, location) # <<<<<<<<<<<<<< - * - * def location_(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_token_type); - __Pyx_GIVEREF(__pyx_v_token_type); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_token_type); - __Pyx_INCREF(__pyx_v_token); - __Pyx_GIVEREF(__pyx_v_token); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_token); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":61 - * while True: - * token_type, token, location = self.next_() - * if token_type != Lexer.NEWLINE: # <<<<<<<<<<<<<< - * return (token_type, token, location) - * - */ - } - } - - /* "fontTools/feaLib/lexer.py":58 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while True: - * token_type, token, location = self.next_() - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_token_type); - __Pyx_XDECREF(__pyx_v_token); - __Pyx_XDECREF(__pyx_v_location); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":64 - * return (token_type, token, location) - * - * def location_(self): # <<<<<<<<<<<<<< - * column = self.pos_ - self.line_start_ + 1 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_9location_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_8location_, "Lexer.location_(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_9location_ = {"location_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_9location_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_8location_}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_9location_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("location_ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 64, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "location_") < 0)) __PYX_ERR(0, 64, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("location_", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 64, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.location_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_8location_(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_8location_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_v_column = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("location_", 0); - - /* "fontTools/feaLib/lexer.py":65 - * - * def location_(self): - * column = self.pos_ - self.line_start_ + 1 # <<<<<<<<<<<<<< - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line_start); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_column = __pyx_t_2; - __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":66 - * def location_(self): - * column = self.pos_ - self.line_start_ + 1 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) # <<<<<<<<<<<<<< - * - * def next_(self): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibLocation); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_filename_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 66, __pyx_L1_error) - if (!__pyx_t_5) { - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } else { - __Pyx_INCREF(__pyx_t_4); - __pyx_t_1 = __pyx_t_4; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L3_bool_binop_done; - } - __Pyx_INCREF(__pyx_kp_u_features); - __pyx_t_1 = __pyx_kp_u_features; - __pyx_L3_bool_binop_done:; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[4] = {__pyx_t_6, __pyx_t_1, __pyx_t_4, __pyx_v_column}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 3+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":64 - * return (token_type, token, location) - * - * def location_(self): # <<<<<<<<<<<<<< - * column = self.pos_ - self.line_start_ + 1 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.location_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_column); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":68 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - * - * def next_(self): # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_WHITESPACE_) - * location = self.location_() - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_11next_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_10next_, "Lexer.next_(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_11next_ = {"next_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_11next_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_10next_}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_11next_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("next_ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 68, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "next_") < 0)) __PYX_ERR(0, 68, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("next_", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 68, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_10next_(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_10next_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_v_location = NULL; - PyObject *__pyx_v_start = NULL; - PyObject *__pyx_v_text = NULL; - Py_ssize_t __pyx_v_limit; - PyObject *__pyx_v_cur_char = NULL; - PyObject *__pyx_v_next_char = NULL; - PyObject *__pyx_v_glyphclass = NULL; - PyObject *__pyx_v_token = NULL; - PyObject *__pyx_v_string = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("next_", 0); - - /* "fontTools/feaLib/lexer.py":69 - * - * def next_(self): - * self.scan_over_(Lexer.CHAR_WHITESPACE_) # <<<<<<<<<<<<<< - * location = self.location_() - * start = self.pos_ - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_WHITESPACE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":70 - * def next_(self): - * self.scan_over_(Lexer.CHAR_WHITESPACE_) - * location = self.location_() # <<<<<<<<<<<<<< - * start = self.pos_ - * text = self.text_ - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_location); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_4, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_v_location = __pyx_t_1; - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":71 - * self.scan_over_(Lexer.CHAR_WHITESPACE_) - * location = self.location_() - * start = self.pos_ # <<<<<<<<<<<<<< - * text = self.text_ - * limit = len(text) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_start = __pyx_t_1; - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":72 - * location = self.location_() - * start = self.pos_ - * text = self.text_ # <<<<<<<<<<<<<< - * limit = len(text) - * if start >= limit: - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_text = __pyx_t_1; - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":73 - * start = self.pos_ - * text = self.text_ - * limit = len(text) # <<<<<<<<<<<<<< - * if start >= limit: - * raise StopIteration() - */ - __pyx_t_6 = PyObject_Length(__pyx_v_text); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 73, __pyx_L1_error) - __pyx_v_limit = __pyx_t_6; - - /* "fontTools/feaLib/lexer.py":74 - * text = self.text_ - * limit = len(text) - * if start >= limit: # <<<<<<<<<<<<<< - * raise StopIteration() - * cur_char = text[start] - */ - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_v_start, __pyx_t_1, Py_GE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(__pyx_t_7)) { - - /* "fontTools/feaLib/lexer.py":75 - * limit = len(text) - * if start >= limit: - * raise StopIteration() # <<<<<<<<<<<<<< - * cur_char = text[start] - * next_char = text[start + 1] if start + 1 < limit else None - */ - __pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_builtin_StopIteration); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 75, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":74 - * text = self.text_ - * limit = len(text) - * if start >= limit: # <<<<<<<<<<<<<< - * raise StopIteration() - * cur_char = text[start] - */ - } - - /* "fontTools/feaLib/lexer.py":76 - * if start >= limit: - * raise StopIteration() - * cur_char = text[start] # <<<<<<<<<<<<<< - * next_char = text[start + 1] if start + 1 < limit else None - * - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_v_start); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_cur_char = __pyx_t_2; - __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":77 - * raise StopIteration() - * cur_char = text[start] - * next_char = text[start + 1] if start + 1 < limit else None # <<<<<<<<<<<<<< - * - * if cur_char == "\n": - */ - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_7) { - __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __pyx_t_4; - __pyx_t_4 = 0; - } else { - __Pyx_INCREF(Py_None); - __pyx_t_2 = Py_None; - } - __pyx_v_next_char = __pyx_t_2; - __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":79 - * next_char = text[start + 1] if start + 1 < limit else None - * - * if cur_char == "\n": # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.line_ += 1 - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u_, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 79, __pyx_L1_error) - if (__pyx_t_7) { - - /* "fontTools/feaLib/lexer.py":80 - * - * if cur_char == "\n": - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.line_ += 1 - * self.line_start_ = self.pos_ - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_4) < 0) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":81 - * if cur_char == "\n": - * self.pos_ += 1 - * self.line_ += 1 # <<<<<<<<<<<<<< - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_4, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line, __pyx_t_2) < 0) __PYX_ERR(0, 81, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":82 - * self.pos_ += 1 - * self.line_ += 1 - * self.line_start_ = self.pos_ # <<<<<<<<<<<<<< - * return (Lexer.NEWLINE, None, location) - * if cur_char == "\r": - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line_start, __pyx_t_2) < 0) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":83 - * self.line_ += 1 - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) # <<<<<<<<<<<<<< - * if cur_char == "\r": - * self.pos_ += 2 if next_char == "\n" else 1 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_NEWLINE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location); - __pyx_t_4 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":79 - * next_char = text[start + 1] if start + 1 < limit else None - * - * if cur_char == "\n": # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.line_ += 1 - */ - } - - /* "fontTools/feaLib/lexer.py":84 - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) - * if cur_char == "\r": # <<<<<<<<<<<<<< - * self.pos_ += 2 if next_char == "\n" else 1 - * self.line_ += 1 - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__2, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 84, __pyx_L1_error) - if (__pyx_t_7) { - - /* "fontTools/feaLib/lexer.py":85 - * return (Lexer.NEWLINE, None, location) - * if cur_char == "\r": - * self.pos_ += 2 if next_char == "\n" else 1 # <<<<<<<<<<<<<< - * self.line_ += 1 - * self.line_start_ = self.pos_ - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_next_char, __pyx_kp_u_, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 85, __pyx_L1_error) - if (__pyx_t_7) { - __Pyx_INCREF(__pyx_int_2); - __pyx_t_4 = __pyx_int_2; - } else { - __Pyx_INCREF(__pyx_int_1); - __pyx_t_4 = __pyx_int_1; - } - __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":86 - * if cur_char == "\r": - * self.pos_ += 2 if next_char == "\n" else 1 - * self.line_ += 1 # <<<<<<<<<<<<<< - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_line); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line, __pyx_t_4) < 0) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":87 - * self.pos_ += 2 if next_char == "\n" else 1 - * self.line_ += 1 - * self.line_start_ = self.pos_ # <<<<<<<<<<<<<< - * return (Lexer.NEWLINE, None, location) - * if cur_char == "#": - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_line_start, __pyx_t_4) < 0) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":88 - * self.line_ += 1 - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) # <<<<<<<<<<<<<< - * if cur_char == "#": - * self.scan_until_(Lexer.CHAR_NEWLINE_) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_NEWLINE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":84 - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) - * if cur_char == "\r": # <<<<<<<<<<<<<< - * self.pos_ += 2 if next_char == "\n" else 1 - * self.line_ += 1 - */ - } - - /* "fontTools/feaLib/lexer.py":89 - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) - * if cur_char == "#": # <<<<<<<<<<<<<< - * self.scan_until_(Lexer.CHAR_NEWLINE_) - * return (Lexer.COMMENT, text[start : self.pos_], location) - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__3, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 89, __pyx_L1_error) - if (__pyx_t_7) { - - /* "fontTools/feaLib/lexer.py":90 - * return (Lexer.NEWLINE, None, location) - * if cur_char == "#": - * self.scan_until_(Lexer.CHAR_NEWLINE_) # <<<<<<<<<<<<<< - * return (Lexer.COMMENT, text[start : self.pos_], location) - * - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_NEWLINE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_1}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":91 - * if cur_char == "#": - * self.scan_until_(Lexer.CHAR_NEWLINE_) - * return (Lexer.COMMENT, text[start : self.pos_], location) # <<<<<<<<<<<<<< - * - * if self.mode_ is Lexer.MODE_FILENAME_: - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_COMMENT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":89 - * self.line_start_ = self.pos_ - * return (Lexer.NEWLINE, None, location) - * if cur_char == "#": # <<<<<<<<<<<<<< - * self.scan_until_(Lexer.CHAR_NEWLINE_) - * return (Lexer.COMMENT, text[start : self.pos_], location) - */ - } - - /* "fontTools/feaLib/lexer.py":93 - * return (Lexer.COMMENT, text[start : self.pos_], location) - * - * if self.mode_ is Lexer.MODE_FILENAME_: # <<<<<<<<<<<<<< - * if cur_char != "(": - * raise FeatureLibError("Expected '(' before file name", location) - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_mode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_MODE_FILENAME); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = (__pyx_t_4 == __pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_7) { - - /* "fontTools/feaLib/lexer.py":94 - * - * if self.mode_ is Lexer.MODE_FILENAME_: - * if cur_char != "(": # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected '(' before file name", location) - * self.scan_until_(")") - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__4, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 94, __pyx_L1_error) - if (unlikely(__pyx_t_7)) { - - /* "fontTools/feaLib/lexer.py":95 - * if self.mode_ is Lexer.MODE_FILENAME_: - * if cur_char != "(": - * raise FeatureLibError("Expected '(' before file name", location) # <<<<<<<<<<<<<< - * self.scan_until_(")") - * cur_char = text[self.pos_] if self.pos_ < limit else None - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Expected_before_file_name, __pyx_v_location}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 95, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":94 - * - * if self.mode_ is Lexer.MODE_FILENAME_: - * if cur_char != "(": # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected '(' before file name", location) - * self.scan_until_(")") - */ - } - - /* "fontTools/feaLib/lexer.py":96 - * if cur_char != "(": - * raise FeatureLibError("Expected '(' before file name", location) - * self.scan_until_(")") # <<<<<<<<<<<<<< - * cur_char = text[self.pos_] if self.pos_ < limit else None - * if cur_char != ")": - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_kp_u__5}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":97 - * raise FeatureLibError("Expected '(' before file name", location) - * self.scan_until_(")") - * cur_char = text[self.pos_] if self.pos_ < limit else None # <<<<<<<<<<<<<< - * if cur_char != ")": - * raise FeatureLibError("Expected ')' after file name", location) - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __pyx_t_1; - __pyx_t_1 = 0; - } else { - __Pyx_INCREF(Py_None); - __pyx_t_3 = Py_None; - } - __Pyx_DECREF_SET(__pyx_v_cur_char, __pyx_t_3); - __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":98 - * self.scan_until_(")") - * cur_char = text[self.pos_] if self.pos_ < limit else None - * if cur_char != ")": # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected ')' after file name", location) - * self.pos_ += 1 - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__5, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 98, __pyx_L1_error) - if (unlikely(__pyx_t_7)) { - - /* "fontTools/feaLib/lexer.py":99 - * cur_char = text[self.pos_] if self.pos_ < limit else None - * if cur_char != ")": - * raise FeatureLibError("Expected ')' after file name", location) # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.mode_ = Lexer.MODE_NORMAL_ - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_kp_u_Expected_after_file_name, __pyx_v_location}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 99, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":98 - * self.scan_until_(")") - * cur_char = text[self.pos_] if self.pos_ < limit else None - * if cur_char != ")": # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected ')' after file name", location) - * self.pos_ += 1 - */ - } - - /* "fontTools/feaLib/lexer.py":100 - * if cur_char != ")": - * raise FeatureLibError("Expected ')' after file name", location) - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.mode_ = Lexer.MODE_NORMAL_ - * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_1) < 0) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":101 - * raise FeatureLibError("Expected ')' after file name", location) - * self.pos_ += 1 - * self.mode_ = Lexer.MODE_NORMAL_ # <<<<<<<<<<<<<< - * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_MODE_NORMAL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_mode, __pyx_t_3) < 0) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":102 - * self.pos_ += 1 - * self.mode_ = Lexer.MODE_NORMAL_ - * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) # <<<<<<<<<<<<<< - * - * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_FILENAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_t_2, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_3, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":93 - * return (Lexer.COMMENT, text[start : self.pos_], location) - * - * if self.mode_ is Lexer.MODE_FILENAME_: # <<<<<<<<<<<<<< - * if cur_char != "(": - * raise FeatureLibError("Expected '(' before file name", location) - */ - } - - /* "fontTools/feaLib/lexer.py":104 - * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) - * - * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - __pyx_t_8 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__6, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 104, __pyx_L1_error) - if (__pyx_t_8) { - } else { - __pyx_t_7 = __pyx_t_8; - goto __pyx_L11_bool_binop_done; - } - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_next_char, __pyx_t_2, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_7 = __pyx_t_8; - __pyx_L11_bool_binop_done:; - if (__pyx_t_7) { - - /* "fontTools/feaLib/lexer.py":105 - * - * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_4) < 0) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":106 - * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< - * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) - * if cur_char == "@": - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_3}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":107 - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) # <<<<<<<<<<<<<< - * if cur_char == "@": - * self.pos_ += 1 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CID); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_4, &__pyx_t_3, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_INCREF(__pyx_int_10); - __Pyx_GIVEREF(__pyx_int_10); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_10); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location); - __pyx_t_2 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":104 - * return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) - * - * if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - } - - /* "fontTools/feaLib/lexer.py":108 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) - * if cur_char == "@": # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__7, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 108, __pyx_L1_error) - if (__pyx_t_7) { - - /* "fontTools/feaLib/lexer.py":109 - * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) - * if cur_char == "@": - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * glyphclass = text[start + 1 : self.pos_] - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_1) < 0) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":110 - * if cur_char == "@": - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) # <<<<<<<<<<<<<< - * glyphclass = text[start + 1 : self.pos_] - * if len(glyphclass) < 1: - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 110, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 110, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_NAME_CONTINUATION); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 110, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 110, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":111 - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * glyphclass = text[start + 1 : self.pos_] # <<<<<<<<<<<<<< - * if len(glyphclass) < 1: - * raise FeatureLibError("Expected glyph class name", location) - */ - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_1, &__pyx_t_3, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_glyphclass = __pyx_t_4; - __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":112 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * glyphclass = text[start + 1 : self.pos_] - * if len(glyphclass) < 1: # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected glyph class name", location) - * if len(glyphclass) > 63: - */ - __pyx_t_6 = PyObject_Length(__pyx_v_glyphclass); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 112, __pyx_L1_error) - __pyx_t_7 = (__pyx_t_6 < 1); - if (unlikely(__pyx_t_7)) { - - /* "fontTools/feaLib/lexer.py":113 - * glyphclass = text[start + 1 : self.pos_] - * if len(glyphclass) < 1: - * raise FeatureLibError("Expected glyph class name", location) # <<<<<<<<<<<<<< - * if len(glyphclass) > 63: - * raise FeatureLibError( - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Expected_glyph_class_name, __pyx_v_location}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 113, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":112 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * glyphclass = text[start + 1 : self.pos_] - * if len(glyphclass) < 1: # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected glyph class name", location) - * if len(glyphclass) > 63: - */ - } - - /* "fontTools/feaLib/lexer.py":114 - * if len(glyphclass) < 1: - * raise FeatureLibError("Expected glyph class name", location) - * if len(glyphclass) > 63: # <<<<<<<<<<<<<< - * raise FeatureLibError( - * "Glyph class names must not be longer than 63 characters", location - */ - __pyx_t_6 = PyObject_Length(__pyx_v_glyphclass); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 114, __pyx_L1_error) - __pyx_t_7 = (__pyx_t_6 > 63); - if (unlikely(__pyx_t_7)) { - - /* "fontTools/feaLib/lexer.py":115 - * raise FeatureLibError("Expected glyph class name", location) - * if len(glyphclass) > 63: - * raise FeatureLibError( # <<<<<<<<<<<<<< - * "Glyph class names must not be longer than 63 characters", location - * ) - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "fontTools/feaLib/lexer.py":116 - * if len(glyphclass) > 63: - * raise FeatureLibError( - * "Glyph class names must not be longer than 63 characters", location # <<<<<<<<<<<<<< - * ) - * if not Lexer.RE_GLYPHCLASS.match(glyphclass): - */ - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Glyph_class_names_must_not_be_lo, __pyx_v_location}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 115, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 115, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":114 - * if len(glyphclass) < 1: - * raise FeatureLibError("Expected glyph class name", location) - * if len(glyphclass) > 63: # <<<<<<<<<<<<<< - * raise FeatureLibError( - * "Glyph class names must not be longer than 63 characters", location - */ - } - - /* "fontTools/feaLib/lexer.py":118 - * "Glyph class names must not be longer than 63 characters", location - * ) - * if not Lexer.RE_GLYPHCLASS.match(glyphclass): # <<<<<<<<<<<<<< - * raise FeatureLibError( - * "Glyph class names must consist of letters, digits, " - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_RE_GLYPHCLASS); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 118, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_match); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_v_glyphclass}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 118, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 118, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_8 = (!__pyx_t_7); - if (unlikely(__pyx_t_8)) { - - /* "fontTools/feaLib/lexer.py":119 - * ) - * if not Lexer.RE_GLYPHCLASS.match(glyphclass): - * raise FeatureLibError( # <<<<<<<<<<<<<< - * "Glyph class names must consist of letters, digits, " - * "underscore, period or hyphen", - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "fontTools/feaLib/lexer.py":122 - * "Glyph class names must consist of letters, digits, " - * "underscore, period or hyphen", - * location, # <<<<<<<<<<<<<< - * ) - * return (Lexer.GLYPHCLASS, glyphclass, location) - */ - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_kp_u_Glyph_class_names_must_consist_o, __pyx_v_location}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 119, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":118 - * "Glyph class names must not be longer than 63 characters", location - * ) - * if not Lexer.RE_GLYPHCLASS.match(glyphclass): # <<<<<<<<<<<<<< - * raise FeatureLibError( - * "Glyph class names must consist of letters, digits, " - */ - } - - /* "fontTools/feaLib/lexer.py":124 - * location, - * ) - * return (Lexer.GLYPHCLASS, glyphclass, location) # <<<<<<<<<<<<<< - * if cur_char in Lexer.CHAR_NAME_START_: - * self.pos_ += 1 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_GLYPHCLASS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_INCREF(__pyx_v_glyphclass); - __Pyx_GIVEREF(__pyx_v_glyphclass); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_glyphclass); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":108 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) - * if cur_char == "@": # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - */ - } - - /* "fontTools/feaLib/lexer.py":125 - * ) - * return (Lexer.GLYPHCLASS, glyphclass, location) - * if cur_char in Lexer.CHAR_NAME_START_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 125, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_NAME_START); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 125, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_cur_char, __pyx_t_3, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 125, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":126 - * return (Lexer.GLYPHCLASS, glyphclass, location) - * if cur_char in Lexer.CHAR_NAME_START_: - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * token = text[start : self.pos_] - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_4) < 0) __PYX_ERR(0, 126, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":127 - * if cur_char in Lexer.CHAR_NAME_START_: - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) # <<<<<<<<<<<<<< - * token = text[start : self.pos_] - * if token == "include": - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CHAR_NAME_CONTINUATION); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_2}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":128 - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * token = text[start : self.pos_] # <<<<<<<<<<<<<< - * if token == "include": - * self.mode_ = Lexer.MODE_FILENAME_ - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_token = __pyx_t_3; - __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":129 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * token = text[start : self.pos_] - * if token == "include": # <<<<<<<<<<<<<< - * self.mode_ = Lexer.MODE_FILENAME_ - * return (Lexer.NAME, token, location) - */ - __pyx_t_8 = (__Pyx_PyUnicode_Equals(__pyx_v_token, __pyx_n_u_include, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 129, __pyx_L1_error) - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":130 - * token = text[start : self.pos_] - * if token == "include": - * self.mode_ = Lexer.MODE_FILENAME_ # <<<<<<<<<<<<<< - * return (Lexer.NAME, token, location) - * if cur_char == "0" and next_char in "xX": - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_MODE_FILENAME); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_mode, __pyx_t_4) < 0) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":129 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - * token = text[start : self.pos_] - * if token == "include": # <<<<<<<<<<<<<< - * self.mode_ = Lexer.MODE_FILENAME_ - * return (Lexer.NAME, token, location) - */ - } - - /* "fontTools/feaLib/lexer.py":131 - * if token == "include": - * self.mode_ = Lexer.MODE_FILENAME_ - * return (Lexer.NAME, token, location) # <<<<<<<<<<<<<< - * if cur_char == "0" and next_char in "xX": - * self.pos_ += 2 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_NAME); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_INCREF(__pyx_v_token); - __Pyx_GIVEREF(__pyx_v_token); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_token); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":125 - * ) - * return (Lexer.GLYPHCLASS, glyphclass, location) - * if cur_char in Lexer.CHAR_NAME_START_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - */ - } - - /* "fontTools/feaLib/lexer.py":132 - * self.mode_ = Lexer.MODE_FILENAME_ - * return (Lexer.NAME, token, location) - * if cur_char == "0" and next_char in "xX": # <<<<<<<<<<<<<< - * self.pos_ += 2 - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u_0, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 132, __pyx_L1_error) - if (__pyx_t_7) { - } else { - __pyx_t_8 = __pyx_t_7; - goto __pyx_L20_bool_binop_done; - } - __pyx_t_7 = (__Pyx_PyUnicode_ContainsTF(__pyx_v_next_char, __pyx_n_u_xX, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 132, __pyx_L1_error) - __pyx_t_8 = __pyx_t_7; - __pyx_L20_bool_binop_done:; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":133 - * return (Lexer.NAME, token, location) - * if cur_char == "0" and next_char in "xX": - * self.pos_ += 2 # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) - * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_4, __pyx_int_2, 2, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":134 - * if cur_char == "0" and next_char in "xX": - * self.pos_ += 2 - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) # <<<<<<<<<<<<<< - * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) - * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_HEXDIGIT); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_1}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":135 - * self.pos_ += 2 - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) - * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) # <<<<<<<<<<<<<< - * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_HEXADECIMAL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_3, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_INCREF(__pyx_int_16); - __Pyx_GIVEREF(__pyx_int_16); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_16); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":132 - * self.mode_ = Lexer.MODE_FILENAME_ - * return (Lexer.NAME, token, location) - * if cur_char == "0" and next_char in "xX": # <<<<<<<<<<<<<< - * self.pos_ += 2 - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) - */ - } - - /* "fontTools/feaLib/lexer.py":136 - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) - * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) - * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u_0, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 136, __pyx_L1_error) - if (__pyx_t_7) { - } else { - __pyx_t_8 = __pyx_t_7; - goto __pyx_L23_bool_binop_done; - } - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_7 = (__Pyx_PySequence_ContainsTF(__pyx_v_next_char, __pyx_t_1, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 136, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = __pyx_t_7; - __pyx_L23_bool_binop_done:; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":137 - * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) - * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) - * if cur_char in Lexer.CHAR_DIGIT_: - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":138 - * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) # <<<<<<<<<<<<<< - * if cur_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_OCTAL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __Pyx_INCREF(__pyx_int_8); - __Pyx_GIVEREF(__pyx_int_8); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_8); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":136 - * self.scan_over_(Lexer.CHAR_HEXDIGIT_) - * return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) - * if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) - */ - } - - /* "fontTools/feaLib/lexer.py":139 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) - * if cur_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_cur_char, __pyx_t_2, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":140 - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) - * if cur_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< - * if self.pos_ >= limit or text[self.pos_] != ".": - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":141 - * if cur_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (!__pyx_t_7) { - } else { - __pyx_t_8 = __pyx_t_7; - goto __pyx_L27_bool_binop_done; - } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u__8, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = __pyx_t_7; - __pyx_L27_bool_binop_done:; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":142 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) # <<<<<<<<<<<<<< - * self.scan_over_(".") - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_NUMBER); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __Pyx_INCREF(__pyx_int_10); - __Pyx_GIVEREF(__pyx_int_10); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_10); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location); - __pyx_t_4 = 0; - __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":141 - * if cur_char in Lexer.CHAR_DIGIT_: - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") - */ - } - - /* "fontTools/feaLib/lexer.py":143 - * if self.pos_ >= limit or text[self.pos_] != ".": - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 143, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_kp_u__8}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":144 - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") - * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":145 - * self.scan_over_(".") - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) # <<<<<<<<<<<<<< - * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: - * self.pos_ += 1 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_FLOAT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 145, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 145, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyNumber_Float(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 145, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location); - __pyx_t_2 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":139 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) - * if cur_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": - */ - } - - /* "fontTools/feaLib/lexer.py":146 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__9, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 146, __pyx_L1_error) - if (__pyx_t_7) { - } else { - __pyx_t_8 = __pyx_t_7; - goto __pyx_L30_bool_binop_done; - } - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_7 = (__Pyx_PySequence_ContainsTF(__pyx_v_next_char, __pyx_t_1, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = __pyx_t_7; - __pyx_L30_bool_binop_done:; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":147 - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":148 - * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< - * if self.pos_ >= limit or text[self.pos_] != ".": - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":149 - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_limit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_t_1, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (!__pyx_t_7) { - } else { - __pyx_t_8 = __pyx_t_7; - goto __pyx_L33_bool_binop_done; - } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_text, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u__8, Py_NE)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = __pyx_t_7; - __pyx_L33_bool_binop_done:; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":150 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) # <<<<<<<<<<<<<< - * self.scan_over_(".") - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_NUMBER); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __Pyx_INCREF(__pyx_int_10); - __Pyx_GIVEREF(__pyx_int_10); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_10); - __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)(&PyInt_Type)), __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_location); - __pyx_t_4 = 0; - __pyx_t_3 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":149 - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * if self.pos_ >= limit or text[self.pos_] != ".": # <<<<<<<<<<<<<< - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") - */ - } - - /* "fontTools/feaLib/lexer.py":151 - * if self.pos_ >= limit or text[self.pos_] != ".": - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_kp_u__8}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":152 - * return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - * self.scan_over_(".") - * self.scan_over_(Lexer.CHAR_DIGIT_) # <<<<<<<<<<<<<< - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char in Lexer.CHAR_SYMBOL_: - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHAR_DIGIT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":153 - * self.scan_over_(".") - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) # <<<<<<<<<<<<<< - * if cur_char in Lexer.CHAR_SYMBOL_: - * self.pos_ += 1 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_FLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_v_start, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyNumber_Float(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":146 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_over_(Lexer.CHAR_DIGIT_) - */ - } - - /* "fontTools/feaLib/lexer.py":154 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char in Lexer.CHAR_SYMBOL_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * return (Lexer.SYMBOL, cur_char, location) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_CHAR_SYMBOL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_8 = (__Pyx_PySequence_ContainsTF(__pyx_v_cur_char, __pyx_t_1, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":155 - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char in Lexer.CHAR_SYMBOL_: - * self.pos_ += 1 # <<<<<<<<<<<<<< - * return (Lexer.SYMBOL, cur_char, location) - * if cur_char == '"': - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_2) < 0) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":156 - * if cur_char in Lexer.CHAR_SYMBOL_: - * self.pos_ += 1 - * return (Lexer.SYMBOL, cur_char, location) # <<<<<<<<<<<<<< - * if cur_char == '"': - * self.pos_ += 1 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_SYMBOL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_INCREF(__pyx_v_cur_char); - __Pyx_GIVEREF(__pyx_v_cur_char); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_cur_char); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location); - __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":154 - * self.scan_over_(Lexer.CHAR_DIGIT_) - * return (Lexer.FLOAT, float(text[start : self.pos_]), location) - * if cur_char in Lexer.CHAR_SYMBOL_: # <<<<<<<<<<<<<< - * self.pos_ += 1 - * return (Lexer.SYMBOL, cur_char, location) - */ - } - - /* "fontTools/feaLib/lexer.py":157 - * self.pos_ += 1 - * return (Lexer.SYMBOL, cur_char, location) - * if cur_char == '"': # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_until_('"') - */ - __pyx_t_8 = (__Pyx_PyUnicode_Equals(__pyx_v_cur_char, __pyx_kp_u__10, Py_EQ)); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 157, __pyx_L1_error) - if (__pyx_t_8) { - - /* "fontTools/feaLib/lexer.py":158 - * return (Lexer.SYMBOL, cur_char, location) - * if cur_char == '"': - * self.pos_ += 1 # <<<<<<<<<<<<<< - * self.scan_until_('"') - * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 158, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 158, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_1) < 0) __PYX_ERR(0, 158, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":159 - * if cur_char == '"': - * self.pos_ += 1 - * self.scan_until_('"') # <<<<<<<<<<<<<< - * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': - * self.pos_ += 1 - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 159, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_kp_u__10}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 159, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":160 - * self.pos_ += 1 - * self.scan_until_('"') - * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': # <<<<<<<<<<<<<< - * self.pos_ += 1 - * # strip newlines embedded within a string - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_length); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_7) { - } else { - __pyx_t_8 = __pyx_t_7; - goto __pyx_L38_bool_binop_done; - } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u__10, Py_EQ)); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 160, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_8 = __pyx_t_7; - __pyx_L38_bool_binop_done:; - if (likely(__pyx_t_8)) { - - /* "fontTools/feaLib/lexer.py":161 - * self.scan_until_('"') - * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': - * self.pos_ += 1 # <<<<<<<<<<<<<< - * # strip newlines embedded within a string - * string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_2) < 0) __PYX_ERR(0, 161, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":163 - * self.pos_ += 1 - * # strip newlines embedded within a string - * string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) # <<<<<<<<<<<<<< - * return (Lexer.STRING, string, location) - * else: - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_re); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sub); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_start, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_9 = __Pyx_PyInt_SubtractObjC(__pyx_t_4, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_text, 0, 0, &__pyx_t_1, &__pyx_t_9, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_9)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_9); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[4] = {__pyx_t_9, __pyx_kp_u__11, __pyx_kp_u__12, __pyx_t_4}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 3+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_string = __pyx_t_2; - __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":164 - * # strip newlines embedded within a string - * string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) - * return (Lexer.STRING, string, location) # <<<<<<<<<<<<<< - * else: - * raise FeatureLibError("Expected '\"' to terminate string", location) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_STRING); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __Pyx_INCREF(__pyx_v_string); - __Pyx_GIVEREF(__pyx_v_string); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_string); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location); - __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":160 - * self.pos_ += 1 - * self.scan_until_('"') - * if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': # <<<<<<<<<<<<<< - * self.pos_ += 1 - * # strip newlines embedded within a string - */ - } - - /* "fontTools/feaLib/lexer.py":166 - * return (Lexer.STRING, string, location) - * else: - * raise FeatureLibError("Expected '\"' to terminate string", location) # <<<<<<<<<<<<<< - * raise FeatureLibError("Unexpected character: %r" % cur_char, location) - * - */ - /*else*/ { - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 166, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_kp_u_Expected_to_terminate_string, __pyx_v_location}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 166, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 166, __pyx_L1_error) - } - - /* "fontTools/feaLib/lexer.py":157 - * self.pos_ += 1 - * return (Lexer.SYMBOL, cur_char, location) - * if cur_char == '"': # <<<<<<<<<<<<<< - * self.pos_ += 1 - * self.scan_until_('"') - */ - } - - /* "fontTools/feaLib/lexer.py":167 - * else: - * raise FeatureLibError("Expected '\"' to terminate string", location) - * raise FeatureLibError("Unexpected character: %r" % cur_char, location) # <<<<<<<<<<<<<< - * - * def scan_over_(self, valid): - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_Unexpected_character_r, __pyx_v_cur_char); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 167, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_9 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_9)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_9); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_9, __pyx_t_4, __pyx_v_location}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 167, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 167, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":68 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - * - * def next_(self): # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_WHITESPACE_) - * location = self.location_() - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.next_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_location); - __Pyx_XDECREF(__pyx_v_start); - __Pyx_XDECREF(__pyx_v_text); - __Pyx_XDECREF(__pyx_v_cur_char); - __Pyx_XDECREF(__pyx_v_next_char); - __Pyx_XDECREF(__pyx_v_glyphclass); - __Pyx_XDECREF(__pyx_v_token); - __Pyx_XDECREF(__pyx_v_string); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":169 - * raise FeatureLibError("Unexpected character: %r" % cur_char, location) - * - * def scan_over_(self, valid): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] in valid: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_, "Lexer.scan_over_(self, valid)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_ = {"scan_over_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_valid = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("scan_over_ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_valid,0}; - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 169, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_valid)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 169, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("scan_over_", 1, 2, 2, 1); __PYX_ERR(0, 169, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_over_") < 0)) __PYX_ERR(0, 169, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_self = values[0]; - __pyx_v_valid = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("scan_over_", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 169, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_over_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_(__pyx_self, __pyx_v_self, __pyx_v_valid); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_12scan_over_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_valid) { - PyObject *__pyx_v_p = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("scan_over_", 0); - - /* "fontTools/feaLib/lexer.py":170 - * - * def scan_over_(self, valid): - * p = self.pos_ # <<<<<<<<<<<<<< - * while p < self.text_length_ and self.text_[p] in valid: - * p += 1 - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_p = __pyx_t_1; - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":171 - * def scan_over_(self, valid): - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] in valid: # <<<<<<<<<<<<<< - * p += 1 - * self.pos_ = p - */ - while (1) { - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 171, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_p, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 171, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 171, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 171, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 171, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_v_valid, Py_EQ)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 171, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __pyx_t_4; - __pyx_L5_bool_binop_done:; - if (!__pyx_t_2) break; - - /* "fontTools/feaLib/lexer.py":172 - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] in valid: - * p += 1 # <<<<<<<<<<<<<< - * self.pos_ = p - * - */ - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_p, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 172, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF_SET(__pyx_v_p, __pyx_t_1); - __pyx_t_1 = 0; - } - - /* "fontTools/feaLib/lexer.py":173 - * while p < self.text_length_ and self.text_[p] in valid: - * p += 1 - * self.pos_ = p # <<<<<<<<<<<<<< - * - * def scan_until_(self, stop_at): - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_v_p) < 0) __PYX_ERR(0, 173, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":169 - * raise FeatureLibError("Unexpected character: %r" % cur_char, location) - * - * def scan_over_(self, valid): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] in valid: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_over_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_p); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":175 - * self.pos_ = p - * - * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] not in stop_at: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_, "Lexer.scan_until_(self, stop_at)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_ = {"scan_until_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_stop_at = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("scan_until_ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_stop_at,0}; - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 175, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_stop_at)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 175, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("scan_until_", 1, 2, 2, 1); __PYX_ERR(0, 175, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_until_") < 0)) __PYX_ERR(0, 175, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_self = values[0]; - __pyx_v_stop_at = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("scan_until_", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 175, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_until_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_(__pyx_self, __pyx_v_self, __pyx_v_stop_at); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_14scan_until_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_stop_at) { - PyObject *__pyx_v_p = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("scan_until_", 0); - - /* "fontTools/feaLib/lexer.py":176 - * - * def scan_until_(self, stop_at): - * p = self.pos_ # <<<<<<<<<<<<<< - * while p < self.text_length_ and self.text_[p] not in stop_at: - * p += 1 - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_p = __pyx_t_1; - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":177 - * def scan_until_(self, stop_at): - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] not in stop_at: # <<<<<<<<<<<<<< - * p += 1 - * self.pos_ = p - */ - while (1) { - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_v_p, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_v_stop_at, Py_NE)); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __pyx_t_4; - __pyx_L5_bool_binop_done:; - if (!__pyx_t_2) break; - - /* "fontTools/feaLib/lexer.py":178 - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] not in stop_at: - * p += 1 # <<<<<<<<<<<<<< - * self.pos_ = p - * - */ - __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_v_p, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF_SET(__pyx_v_p, __pyx_t_1); - __pyx_t_1 = 0; - } - - /* "fontTools/feaLib/lexer.py":179 - * while p < self.text_length_ and self.text_[p] not in stop_at: - * p += 1 - * self.pos_ = p # <<<<<<<<<<<<<< - * - * def scan_anonymous_block(self, tag): - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_v_p) < 0) __PYX_ERR(0, 179, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":175 - * self.pos_ = p - * - * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] not in stop_at: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_until_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_p); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":181 - * self.pos_ = p - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * location = self.location_() - * tag = tag.strip() - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block, "Lexer.scan_anonymous_block(self, tag)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block = {"scan_anonymous_block", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_tag = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("scan_anonymous_block (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_tag,0}; - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_tag)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, 1); __PYX_ERR(0, 181, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_anonymous_block") < 0)) __PYX_ERR(0, 181, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_self = values[0]; - __pyx_v_tag = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 181, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block(__pyx_self, __pyx_v_self, __pyx_v_tag); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_5Lexer_16scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag) { - PyObject *__pyx_v_location = NULL; - PyObject *__pyx_v_regexp = NULL; - PyObject *__pyx_v_split = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("scan_anonymous_block", 0); - __Pyx_INCREF(__pyx_v_tag); - - /* "fontTools/feaLib/lexer.py":182 - * - * def scan_anonymous_block(self, tag): - * location = self.location_() # <<<<<<<<<<<<<< - * tag = tag.strip() - * self.scan_until_(Lexer.CHAR_NEWLINE_) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_location); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 182, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 182, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_v_location = __pyx_t_1; - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":183 - * def scan_anonymous_block(self, tag): - * location = self.location_() - * tag = tag.strip() # <<<<<<<<<<<<<< - * self.scan_until_(Lexer.CHAR_NEWLINE_) - * self.scan_over_(Lexer.CHAR_NEWLINE_) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_tag, __pyx_n_s_strip); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF_SET(__pyx_v_tag, __pyx_t_1); - __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":184 - * location = self.location_() - * tag = tag.strip() - * self.scan_until_(Lexer.CHAR_NEWLINE_) # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_NEWLINE_) - * regexp = r"}\s*" + tag + r"\s*;" - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_until); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 184, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 184, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_CHAR_NEWLINE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 184, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":185 - * tag = tag.strip() - * self.scan_until_(Lexer.CHAR_NEWLINE_) - * self.scan_over_(Lexer.CHAR_NEWLINE_) # <<<<<<<<<<<<<< - * regexp = r"}\s*" + tag + r"\s*;" - * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_scan_over); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_CHAR_NEWLINE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "fontTools/feaLib/lexer.py":186 - * self.scan_until_(Lexer.CHAR_NEWLINE_) - * self.scan_over_(Lexer.CHAR_NEWLINE_) - * regexp = r"}\s*" + tag + r"\s*;" # <<<<<<<<<<<<<< - * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) - * if len(split) != 2: - */ - __pyx_t_1 = PyNumber_Add(__pyx_kp_u_s, __pyx_v_tag); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 186, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_u_s_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 186, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_regexp = __pyx_t_2; - __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":187 - * self.scan_over_(Lexer.CHAR_NEWLINE_) - * regexp = r"}\s*" + tag + r"\s*;" - * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) # <<<<<<<<<<<<<< - * if len(split) != 2: - * raise FeatureLibError( - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_re); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_split); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_text_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetSlice(__pyx_t_2, 0, 0, &__pyx_t_3, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_regexp); - __Pyx_GIVEREF(__pyx_v_regexp); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_regexp); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_maxsplit, __pyx_int_1) < 0) __PYX_ERR(0, 187, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_split = __pyx_t_2; - __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":188 - * regexp = r"}\s*" + tag + r"\s*;" - * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) - * if len(split) != 2: # <<<<<<<<<<<<<< - * raise FeatureLibError( - * "Expected '} %s;' to terminate anonymous block" % tag, location - */ - __pyx_t_6 = PyObject_Length(__pyx_v_split); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 188, __pyx_L1_error) - __pyx_t_7 = (__pyx_t_6 != 2); - if (unlikely(__pyx_t_7)) { - - /* "fontTools/feaLib/lexer.py":189 - * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) - * if len(split) != 2: - * raise FeatureLibError( # <<<<<<<<<<<<<< - * "Expected '} %s;' to terminate anonymous block" % tag, location - * ) - */ - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 189, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - - /* "fontTools/feaLib/lexer.py":190 - * if len(split) != 2: - * raise FeatureLibError( - * "Expected '} %s;' to terminate anonymous block" % tag, location # <<<<<<<<<<<<<< - * ) - * self.pos_ += len(split[0]) - */ - __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_Expected_s_to_terminate_anonymou, __pyx_v_tag); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 190, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_t_3, __pyx_v_location}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 189, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 189, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":188 - * regexp = r"}\s*" + tag + r"\s*;" - * split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) - * if len(split) != 2: # <<<<<<<<<<<<<< - * raise FeatureLibError( - * "Expected '} %s;' to terminate anonymous block" % tag, location - */ - } - - /* "fontTools/feaLib/lexer.py":192 - * "Expected '} %s;' to terminate anonymous block" % tag, location - * ) - * self.pos_ += len(split[0]) # <<<<<<<<<<<<<< - * return (Lexer.ANONYMOUS_BLOCK, split[0], location) - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_split, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_Length(__pyx_t_5); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 192, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyInt_FromSsize_t(__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pos, __pyx_t_3) < 0) __PYX_ERR(0, 192, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":193 - * ) - * self.pos_ += len(split[0]) - * return (Lexer.ANONYMOUS_BLOCK, split[0], location) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ANONYMOUS_BLOCK); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_split, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_location); - __pyx_t_5 = 0; - __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":181 - * self.pos_ = p - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * location = self.location_() - * tag = tag.strip() - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("fontTools.feaLib.lexer.Lexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_location); - __Pyx_XDECREF(__pyx_v_regexp); - __Pyx_XDECREF(__pyx_v_split); - __Pyx_XDECREF(__pyx_v_tag); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":211 - * """ - * - * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< - * """Initializes an IncludingLexer. - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer___init__, "IncludingLexer.__init__(self, featurefile, *, includeDir=None)\nInitializes an IncludingLexer.\n\n Behavior:\n If includeDir is passed, it will be used to determine the top-level\n include directory to use for all encountered include statements. If it is\n not passed, ``os.path.dirname(featurefile)`` will be considered the\n include directory.\n "); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__ = {"__init__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer___init__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_featurefile = 0; - PyObject *__pyx_v_includeDir = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_featurefile,&__pyx_n_s_includeDir,0}; - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)((PyObject *)Py_None)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 211, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_featurefile)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 211, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 211, __pyx_L3_error) - } - } - if (kw_args == 1) { - const Py_ssize_t index = 2; - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, *__pyx_pyargnames[index]); - if (value) { values[index] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 211, __pyx_L3_error) - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(0, 211, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_self = values[0]; - __pyx_v_featurefile = values[1]; - __pyx_v_includeDir = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 211, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer___init__(__pyx_self, __pyx_v_self, __pyx_v_featurefile, __pyx_v_includeDir); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_featurefile, PyObject *__pyx_v_includeDir) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__init__", 0); - - /* "fontTools/feaLib/lexer.py":221 - * """ - * - * self.lexers_ = [self.make_lexer_(featurefile)] # <<<<<<<<<<<<<< - * self.featurefilepath = self.lexers_[0].filename_ - * self.includeDir = includeDir - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_make_lexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_featurefile}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_lexers, __pyx_t_2) < 0) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":222 - * - * self.lexers_ = [self.make_lexer_(featurefile)] - * self.featurefilepath = self.lexers_[0].filename_ # <<<<<<<<<<<<<< - * self.includeDir = includeDir - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_filename_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_featurefilepath, __pyx_t_2) < 0) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":223 - * self.lexers_ = [self.make_lexer_(featurefile)] - * self.featurefilepath = self.lexers_[0].filename_ - * self.includeDir = includeDir # <<<<<<<<<<<<<< - * - * def __iter__(self): - */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_includeDir, __pyx_v_includeDir) < 0) __PYX_ERR(0, 223, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":211 - * """ - * - * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< - * """Initializes an IncludingLexer. - * - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":225 - * self.includeDir = includeDir - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__, "IncludingLexer.__iter__(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__ = {"__iter__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 225, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__iter__") < 0)) __PYX_ERR(0, 225, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__iter__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 225, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__iter__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_2__iter__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__iter__", 0); - - /* "fontTools/feaLib/lexer.py":226 - * - * def __iter__(self): - * return self # <<<<<<<<<<<<<< - * - * def next(self): # Python 2 - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self); - __pyx_r = __pyx_v_self; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":225 - * self.includeDir = includeDir - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":228 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_5next(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_4next, "IncludingLexer.next(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_5next = {"next", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_5next, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_4next}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_5next(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("next (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 228, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "next") < 0)) __PYX_ERR(0, 228, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("next", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 228, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_4next(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_4next(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("next", 0); - - /* "fontTools/feaLib/lexer.py":229 - * - * def next(self): # Python 2 - * return self.__next__() # <<<<<<<<<<<<<< - * - * def __next__(self): # Python 3 - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_next); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 229, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 229, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":228 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.next", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":231 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while self.lexers_: - * lexer = self.lexers_[-1] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__, "IncludingLexer.__next__(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__ = {"__next__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 231, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__next__") < 0)) __PYX_ERR(0, 231, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__next__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 231, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_6__next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_v_lexer = NULL; - PyObject *__pyx_v_token_type = NULL; - PyObject *__pyx_v_token = NULL; - PyObject *__pyx_v_location = NULL; - PyObject *__pyx_v_fname_type = NULL; - PyObject *__pyx_v_fname_token = NULL; - PyObject *__pyx_v_fname_location = NULL; - PyObject *__pyx_v_path = NULL; - PyObject *__pyx_v_curpath = NULL; - PyObject *__pyx_v_err = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *(*__pyx_t_10)(PyObject *); - int __pyx_t_11; - int __pyx_t_12; - Py_ssize_t __pyx_t_13; - int __pyx_t_14; - PyObject *__pyx_t_15 = NULL; - int __pyx_t_16; - char const *__pyx_t_17; - PyObject *__pyx_t_18 = NULL; - PyObject *__pyx_t_19 = NULL; - PyObject *__pyx_t_20 = NULL; - PyObject *__pyx_t_21 = NULL; - PyObject *__pyx_t_22 = NULL; - PyObject *__pyx_t_23 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__next__", 0); - - /* "fontTools/feaLib/lexer.py":232 - * - * def __next__(self): # Python 3 - * while self.lexers_: # <<<<<<<<<<<<<< - * lexer = self.lexers_[-1] - * try: - */ - while (1) { - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 232, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 232, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!__pyx_t_2) break; - - /* "fontTools/feaLib/lexer.py":233 - * def __next__(self): # Python 3 - * while self.lexers_: - * lexer = self.lexers_[-1] # <<<<<<<<<<<<<< - * try: - * token_type, token, location = next(lexer) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 233, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 233, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF_SET(__pyx_v_lexer, __pyx_t_3); - __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":234 - * while self.lexers_: - * lexer = self.lexers_[-1] - * try: # <<<<<<<<<<<<<< - * token_type, token, location = next(lexer) - * except StopIteration: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_6); - /*try:*/ { - - /* "fontTools/feaLib/lexer.py":235 - * lexer = self.lexers_[-1] - * try: - * token_type, token, location = next(lexer) # <<<<<<<<<<<<<< - * except StopIteration: - * self.lexers_.pop() - */ - __pyx_t_3 = __Pyx_PyIter_Next(__pyx_v_lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 235, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_3); - if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { - PyObject* sequence = __pyx_t_3; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 3)) { - if (size > 3) __Pyx_RaiseTooManyValuesError(3); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 235, __pyx_L5_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_7 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_8 = PyTuple_GET_ITEM(sequence, 2); - } else { - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_7 = PyList_GET_ITEM(sequence, 1); - __pyx_t_8 = PyList_GET_ITEM(sequence, 2); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(__pyx_t_8); - #else - __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 235, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 235, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 235, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_8); - #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_9 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 235, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_9); - index = 0; __pyx_t_1 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_1)) goto __pyx_L13_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - index = 1; __pyx_t_7 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_7)) goto __pyx_L13_unpacking_failed; - __Pyx_GOTREF(__pyx_t_7); - index = 2; __pyx_t_8 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_8)) goto __pyx_L13_unpacking_failed; - __Pyx_GOTREF(__pyx_t_8); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_9), 3) < 0) __PYX_ERR(0, 235, __pyx_L5_error) - __pyx_t_10 = NULL; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L14_unpacking_done; - __pyx_L13_unpacking_failed:; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_10 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 235, __pyx_L5_error) - __pyx_L14_unpacking_done:; - } - __Pyx_XDECREF_SET(__pyx_v_token_type, __pyx_t_1); - __pyx_t_1 = 0; - __Pyx_XDECREF_SET(__pyx_v_token, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_XDECREF_SET(__pyx_v_location, __pyx_t_8); - __pyx_t_8 = 0; - - /* "fontTools/feaLib/lexer.py":234 - * while self.lexers_: - * lexer = self.lexers_[-1] - * try: # <<<<<<<<<<<<<< - * token_type, token, location = next(lexer) - * except StopIteration: - */ - } - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - goto __pyx_L12_try_end; - __pyx_L5_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":236 - * try: - * token_type, token, location = next(lexer) - * except StopIteration: # <<<<<<<<<<<<<< - * self.lexers_.pop() - * continue - */ - __pyx_t_11 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_StopIteration); - if (__pyx_t_11) { - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_8, &__pyx_t_7) < 0) __PYX_ERR(0, 236, __pyx_L7_except_error) - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_7); - - /* "fontTools/feaLib/lexer.py":237 - * token_type, token, location = next(lexer) - * except StopIteration: - * self.lexers_.pop() # <<<<<<<<<<<<<< - * continue - * if token_type is Lexer.NAME and token == "include": - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 237, __pyx_L7_except_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_Pop(__pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 237, __pyx_L7_except_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":238 - * except StopIteration: - * self.lexers_.pop() - * continue # <<<<<<<<<<<<<< - * if token_type is Lexer.NAME and token == "include": - * fname_type, fname_token, fname_location = lexer.next() - */ - goto __pyx_L15_except_continue; - __pyx_L15_except_continue:; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - goto __pyx_L11_try_continue; - } - goto __pyx_L7_except_error; - - /* "fontTools/feaLib/lexer.py":234 - * while self.lexers_: - * lexer = self.lexers_[-1] - * try: # <<<<<<<<<<<<<< - * token_type, token, location = next(lexer) - * except StopIteration: - */ - __pyx_L7_except_error:; - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_XGIVEREF(__pyx_t_6); - __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); - goto __pyx_L1_error; - __pyx_L11_try_continue:; - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_XGIVEREF(__pyx_t_6); - __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); - goto __pyx_L3_continue; - __pyx_L12_try_end:; - } - - /* "fontTools/feaLib/lexer.py":239 - * self.lexers_.pop() - * continue - * if token_type is Lexer.NAME and token == "include": # <<<<<<<<<<<<<< - * fname_type, fname_token, fname_location = lexer.next() - * if fname_type is not Lexer.FILENAME: - */ - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 239, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_NAME); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 239, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_12 = (__pyx_v_token_type == __pyx_t_8); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (__pyx_t_12) { - } else { - __pyx_t_2 = __pyx_t_12; - goto __pyx_L18_bool_binop_done; - } - __pyx_t_12 = (__Pyx_PyUnicode_Equals(__pyx_v_token, __pyx_n_u_include, Py_EQ)); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 239, __pyx_L1_error) - __pyx_t_2 = __pyx_t_12; - __pyx_L18_bool_binop_done:; - if (__pyx_t_2) { - - /* "fontTools/feaLib/lexer.py":240 - * continue - * if token_type is Lexer.NAME and token == "include": - * fname_type, fname_token, fname_location = lexer.next() # <<<<<<<<<<<<<< - * if fname_type is not Lexer.FILENAME: - * raise FeatureLibError("Expected file name", fname_location) - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_lexer, __pyx_n_s_next_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_3 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_8 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_11, 0+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - if ((likely(PyTuple_CheckExact(__pyx_t_8))) || (PyList_CheckExact(__pyx_t_8))) { - PyObject* sequence = __pyx_t_8; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 3)) { - if (size > 3) __Pyx_RaiseTooManyValuesError(3); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 240, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_7 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_9 = PyTuple_GET_ITEM(sequence, 2); - } else { - __pyx_t_7 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - __pyx_t_9 = PyList_GET_ITEM(sequence, 2); - } - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_9); - #else - __pyx_t_7 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_1 = PyObject_GetIter(__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); - index = 0; __pyx_t_7 = __pyx_t_10(__pyx_t_1); if (unlikely(!__pyx_t_7)) goto __pyx_L20_unpacking_failed; - __Pyx_GOTREF(__pyx_t_7); - index = 1; __pyx_t_3 = __pyx_t_10(__pyx_t_1); if (unlikely(!__pyx_t_3)) goto __pyx_L20_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - index = 2; __pyx_t_9 = __pyx_t_10(__pyx_t_1); if (unlikely(!__pyx_t_9)) goto __pyx_L20_unpacking_failed; - __Pyx_GOTREF(__pyx_t_9); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_1), 3) < 0) __PYX_ERR(0, 240, __pyx_L1_error) - __pyx_t_10 = NULL; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L21_unpacking_done; - __pyx_L20_unpacking_failed:; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_10 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 240, __pyx_L1_error) - __pyx_L21_unpacking_done:; - } - __Pyx_XDECREF_SET(__pyx_v_fname_type, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_XDECREF_SET(__pyx_v_fname_token, __pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF_SET(__pyx_v_fname_location, __pyx_t_9); - __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":241 - * if token_type is Lexer.NAME and token == "include": - * fname_type, fname_token, fname_location = lexer.next() - * if fname_type is not Lexer.FILENAME: # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected file name", fname_location) - * # semi_type, semi_token, semi_location = lexer.next() - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 241, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_FILENAME); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 241, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_2 = (__pyx_v_fname_type != __pyx_t_9); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(__pyx_t_2)) { - - /* "fontTools/feaLib/lexer.py":242 - * fname_type, fname_token, fname_location = lexer.next() - * if fname_type is not Lexer.FILENAME: - * raise FeatureLibError("Expected file name", fname_location) # <<<<<<<<<<<<<< - * # semi_type, semi_token, semi_location = lexer.next() - * # if semi_type is not Lexer.SYMBOL or semi_token != ";": - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_3 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_kp_u_Expected_file_name, __pyx_v_fname_location}; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_11, 2+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_Raise(__pyx_t_9, 0, 0, 0); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __PYX_ERR(0, 242, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":241 - * if token_type is Lexer.NAME and token == "include": - * fname_type, fname_token, fname_location = lexer.next() - * if fname_type is not Lexer.FILENAME: # <<<<<<<<<<<<<< - * raise FeatureLibError("Expected file name", fname_location) - * # semi_type, semi_token, semi_location = lexer.next() - */ - } - - /* "fontTools/feaLib/lexer.py":246 - * # if semi_type is not Lexer.SYMBOL or semi_token != ";": - * # raise FeatureLibError("Expected ';'", semi_location) - * if os.path.isabs(fname_token): # <<<<<<<<<<<<<< - * path = fname_token - * else: - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_os); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_isabs); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_fname_token}; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_11, 1+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_2) { - - /* "fontTools/feaLib/lexer.py":247 - * # raise FeatureLibError("Expected ';'", semi_location) - * if os.path.isabs(fname_token): - * path = fname_token # <<<<<<<<<<<<<< - * else: - * if self.includeDir is not None: - */ - __Pyx_INCREF(__pyx_v_fname_token); - __Pyx_XDECREF_SET(__pyx_v_path, __pyx_v_fname_token); - - /* "fontTools/feaLib/lexer.py":246 - * # if semi_type is not Lexer.SYMBOL or semi_token != ";": - * # raise FeatureLibError("Expected ';'", semi_location) - * if os.path.isabs(fname_token): # <<<<<<<<<<<<<< - * path = fname_token - * else: - */ - goto __pyx_L23; - } - - /* "fontTools/feaLib/lexer.py":249 - * path = fname_token - * else: - * if self.includeDir is not None: # <<<<<<<<<<<<<< - * curpath = self.includeDir - * elif self.featurefilepath is not None: - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_includeDir); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_2 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_2) { - - /* "fontTools/feaLib/lexer.py":250 - * else: - * if self.includeDir is not None: - * curpath = self.includeDir # <<<<<<<<<<<<<< - * elif self.featurefilepath is not None: - * curpath = os.path.dirname(self.featurefilepath) - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_includeDir); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 250, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_XDECREF_SET(__pyx_v_curpath, __pyx_t_9); - __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":249 - * path = fname_token - * else: - * if self.includeDir is not None: # <<<<<<<<<<<<<< - * curpath = self.includeDir - * elif self.featurefilepath is not None: - */ - goto __pyx_L24; - } - - /* "fontTools/feaLib/lexer.py":251 - * if self.includeDir is not None: - * curpath = self.includeDir - * elif self.featurefilepath is not None: # <<<<<<<<<<<<<< - * curpath = os.path.dirname(self.featurefilepath) - * else: - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_featurefilepath); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_2 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_2) { - - /* "fontTools/feaLib/lexer.py":252 - * curpath = self.includeDir - * elif self.featurefilepath is not None: - * curpath = os.path.dirname(self.featurefilepath) # <<<<<<<<<<<<<< - * else: - * # if the IncludingLexer was initialized from an in-memory - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_os); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_dirname); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_featurefilepath); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_11, 1+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_curpath, __pyx_t_9); - __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":251 - * if self.includeDir is not None: - * curpath = self.includeDir - * elif self.featurefilepath is not None: # <<<<<<<<<<<<<< - * curpath = os.path.dirname(self.featurefilepath) - * else: - */ - goto __pyx_L24; - } - - /* "fontTools/feaLib/lexer.py":258 - * # its filesystem path, therefore we fall back to using the - * # current working directory to resolve relative includes - * curpath = os.getcwd() # <<<<<<<<<<<<<< - * path = os.path.join(curpath, fname_token) - * if len(self.lexers_) >= 5: - */ - /*else*/ { - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_os); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_getcwd); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_8, }; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_11, 0+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_curpath, __pyx_t_9); - __pyx_t_9 = 0; - } - __pyx_L24:; - - /* "fontTools/feaLib/lexer.py":259 - * # current working directory to resolve relative includes - * curpath = os.getcwd() - * path = os.path.join(curpath, fname_token) # <<<<<<<<<<<<<< - * if len(self.lexers_) >= 5: - * raise FeatureLibError("Too many recursive includes", fname_location) - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_os); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_path); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_join); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_8, __pyx_v_curpath, __pyx_v_fname_token}; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_11, 2+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_path, __pyx_t_9); - __pyx_t_9 = 0; - } - __pyx_L23:; - - /* "fontTools/feaLib/lexer.py":260 - * curpath = os.getcwd() - * path = os.path.join(curpath, fname_token) - * if len(self.lexers_) >= 5: # <<<<<<<<<<<<<< - * raise FeatureLibError("Too many recursive includes", fname_location) - * try: - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 260, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_13 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_13 == ((Py_ssize_t)-1))) __PYX_ERR(0, 260, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = (__pyx_t_13 >= 5); - if (unlikely(__pyx_t_2)) { - - /* "fontTools/feaLib/lexer.py":261 - * path = os.path.join(curpath, fname_token) - * if len(self.lexers_) >= 5: - * raise FeatureLibError("Too many recursive includes", fname_location) # <<<<<<<<<<<<<< - * try: - * self.lexers_.append(self.make_lexer_(path)) - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 261, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_8, __pyx_kp_u_Too_many_recursive_includes, __pyx_v_fname_location}; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_11, 2+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 261, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_Raise(__pyx_t_9, 0, 0, 0); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __PYX_ERR(0, 261, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":260 - * curpath = os.getcwd() - * path = os.path.join(curpath, fname_token) - * if len(self.lexers_) >= 5: # <<<<<<<<<<<<<< - * raise FeatureLibError("Too many recursive includes", fname_location) - * try: - */ - } - - /* "fontTools/feaLib/lexer.py":262 - * if len(self.lexers_) >= 5: - * raise FeatureLibError("Too many recursive includes", fname_location) - * try: # <<<<<<<<<<<<<< - * self.lexers_.append(self.make_lexer_(path)) - * except FileNotFoundError as err: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_6); - __Pyx_XGOTREF(__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_4); - /*try:*/ { - - /* "fontTools/feaLib/lexer.py":263 - * raise FeatureLibError("Too many recursive includes", fname_location) - * try: - * self.lexers_.append(self.make_lexer_(path)) # <<<<<<<<<<<<<< - * except FileNotFoundError as err: - * raise IncludedFeaNotFound(fname_token, fname_location) from err - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 263, __pyx_L26_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_make_lexer); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 263, __pyx_L26_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_7 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_v_path}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_11, 1+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 263, __pyx_L26_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __pyx_t_14 = __Pyx_PyObject_Append(__pyx_t_9, __pyx_t_3); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 263, __pyx_L26_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":262 - * if len(self.lexers_) >= 5: - * raise FeatureLibError("Too many recursive includes", fname_location) - * try: # <<<<<<<<<<<<<< - * self.lexers_.append(self.make_lexer_(path)) - * except FileNotFoundError as err: - */ - } - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L33_try_end; - __pyx_L26_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":264 - * try: - * self.lexers_.append(self.make_lexer_(path)) - * except FileNotFoundError as err: # <<<<<<<<<<<<<< - * raise IncludedFeaNotFound(fname_token, fname_location) from err - * else: - */ - __Pyx_ErrFetch(&__pyx_t_3, &__pyx_t_9, &__pyx_t_8); - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_FileNotFoundError); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 264, __pyx_L28_except_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_11 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_3, __pyx_t_7); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_ErrRestore(__pyx_t_3, __pyx_t_9, __pyx_t_8); - __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_8 = 0; - if (__pyx_t_11) { - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_9, &__pyx_t_3) < 0) __PYX_ERR(0, 264, __pyx_L28_except_error) - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_9); - __pyx_v_err = __pyx_t_9; - /*try:*/ { - - /* "fontTools/feaLib/lexer.py":265 - * self.lexers_.append(self.make_lexer_(path)) - * except FileNotFoundError as err: - * raise IncludedFeaNotFound(fname_token, fname_location) from err # <<<<<<<<<<<<<< - * else: - * return (token_type, token, location) - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_IncludedFeaNotFound); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 265, __pyx_L39_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_15 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_15)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_15); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_15, __pyx_v_fname_token, __pyx_v_fname_location}; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_11, 2+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 265, __pyx_L39_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_Raise(__pyx_t_7, 0, 0, __pyx_v_err); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __PYX_ERR(0, 265, __pyx_L39_error) - } - - /* "fontTools/feaLib/lexer.py":264 - * try: - * self.lexers_.append(self.make_lexer_(path)) - * except FileNotFoundError as err: # <<<<<<<<<<<<<< - * raise IncludedFeaNotFound(fname_token, fname_location) from err - * else: - */ - /*finally:*/ { - __pyx_L39_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_21, &__pyx_t_22, &__pyx_t_23); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20) < 0)) __Pyx_ErrFetch(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20); - __Pyx_XGOTREF(__pyx_t_18); - __Pyx_XGOTREF(__pyx_t_19); - __Pyx_XGOTREF(__pyx_t_20); - __Pyx_XGOTREF(__pyx_t_21); - __Pyx_XGOTREF(__pyx_t_22); - __Pyx_XGOTREF(__pyx_t_23); - __pyx_t_11 = __pyx_lineno; __pyx_t_16 = __pyx_clineno; __pyx_t_17 = __pyx_filename; - { - __Pyx_DECREF(__pyx_v_err); __pyx_v_err = 0; - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_21); - __Pyx_XGIVEREF(__pyx_t_22); - __Pyx_XGIVEREF(__pyx_t_23); - __Pyx_ExceptionReset(__pyx_t_21, __pyx_t_22, __pyx_t_23); - } - __Pyx_XGIVEREF(__pyx_t_18); - __Pyx_XGIVEREF(__pyx_t_19); - __Pyx_XGIVEREF(__pyx_t_20); - __Pyx_ErrRestore(__pyx_t_18, __pyx_t_19, __pyx_t_20); - __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; - __pyx_lineno = __pyx_t_11; __pyx_clineno = __pyx_t_16; __pyx_filename = __pyx_t_17; - goto __pyx_L28_except_error; - } - } - } - goto __pyx_L28_except_error; - - /* "fontTools/feaLib/lexer.py":262 - * if len(self.lexers_) >= 5: - * raise FeatureLibError("Too many recursive includes", fname_location) - * try: # <<<<<<<<<<<<<< - * self.lexers_.append(self.make_lexer_(path)) - * except FileNotFoundError as err: - */ - __pyx_L28_except_error:; - __Pyx_XGIVEREF(__pyx_t_6); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_5, __pyx_t_4); - goto __pyx_L1_error; - __pyx_L33_try_end:; - } - - /* "fontTools/feaLib/lexer.py":239 - * self.lexers_.pop() - * continue - * if token_type is Lexer.NAME and token == "include": # <<<<<<<<<<<<<< - * fname_type, fname_token, fname_location = lexer.next() - * if fname_type is not Lexer.FILENAME: - */ - goto __pyx_L17; - } - - /* "fontTools/feaLib/lexer.py":267 - * raise IncludedFeaNotFound(fname_token, fname_location) from err - * else: - * return (token_type, token, location) # <<<<<<<<<<<<<< - * raise StopIteration() - * - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_token_type); - __Pyx_GIVEREF(__pyx_v_token_type); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_token_type); - __Pyx_INCREF(__pyx_v_token); - __Pyx_GIVEREF(__pyx_v_token); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_token); - __Pyx_INCREF(__pyx_v_location); - __Pyx_GIVEREF(__pyx_v_location); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_location); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - } - __pyx_L17:; - __pyx_L3_continue:; - } - - /* "fontTools/feaLib/lexer.py":268 - * else: - * return (token_type, token, location) - * raise StopIteration() # <<<<<<<<<<<<<< - * - * @staticmethod - */ - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_builtin_StopIteration); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 268, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":231 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while self.lexers_: - * lexer = self.lexers_[-1] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_15); - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_lexer); - __Pyx_XDECREF(__pyx_v_token_type); - __Pyx_XDECREF(__pyx_v_token); - __Pyx_XDECREF(__pyx_v_location); - __Pyx_XDECREF(__pyx_v_fname_type); - __Pyx_XDECREF(__pyx_v_fname_token); - __Pyx_XDECREF(__pyx_v_fname_location); - __Pyx_XDECREF(__pyx_v_path); - __Pyx_XDECREF(__pyx_v_curpath); - __Pyx_XDECREF(__pyx_v_err); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":270 - * raise StopIteration() - * - * @staticmethod # <<<<<<<<<<<<<< - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_, "IncludingLexer.make_lexer_(file_or_path)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_ = {"make_lexer_", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_file_or_path = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("make_lexer_ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_file_or_path,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_file_or_path)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 270, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "make_lexer_") < 0)) __PYX_ERR(0, 270, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_file_or_path = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("make_lexer_", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 270, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.make_lexer_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_(__pyx_self, __pyx_v_file_or_path); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_8make_lexer_(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_file_or_path) { - PyObject *__pyx_v_fileobj = NULL; - int __pyx_v_closing; - PyObject *__pyx_v_filename = NULL; - PyObject *__pyx_v_data = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("make_lexer_", 0); - - /* "fontTools/feaLib/lexer.py":272 - * @staticmethod - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): # <<<<<<<<<<<<<< - * fileobj, closing = file_or_path, False - * else: - */ - __pyx_t_1 = __Pyx_HasAttr(__pyx_v_file_or_path, __pyx_n_u_read); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 272, __pyx_L1_error) - if (__pyx_t_1) { - - /* "fontTools/feaLib/lexer.py":273 - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): - * fileobj, closing = file_or_path, False # <<<<<<<<<<<<<< - * else: - * filename, closing = file_or_path, True - */ - __pyx_t_2 = __pyx_v_file_or_path; - __Pyx_INCREF(__pyx_t_2); - __pyx_t_1 = 0; - __pyx_v_fileobj = __pyx_t_2; - __pyx_t_2 = 0; - __pyx_v_closing = __pyx_t_1; - - /* "fontTools/feaLib/lexer.py":272 - * @staticmethod - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): # <<<<<<<<<<<<<< - * fileobj, closing = file_or_path, False - * else: - */ - goto __pyx_L3; - } - - /* "fontTools/feaLib/lexer.py":275 - * fileobj, closing = file_or_path, False - * else: - * filename, closing = file_or_path, True # <<<<<<<<<<<<<< - * fileobj = open(filename, "r", encoding="utf-8") - * data = fileobj.read() - */ - /*else*/ { - __pyx_t_2 = __pyx_v_file_or_path; - __Pyx_INCREF(__pyx_t_2); - __pyx_t_1 = 1; - __pyx_v_filename = __pyx_t_2; - __pyx_t_2 = 0; - __pyx_v_closing = __pyx_t_1; - - /* "fontTools/feaLib/lexer.py":276 - * else: - * filename, closing = file_or_path, True - * fileobj = open(filename, "r", encoding="utf-8") # <<<<<<<<<<<<<< - * data = fileobj.read() - * filename = getattr(fileobj, "name", None) - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_filename); - __Pyx_GIVEREF(__pyx_v_filename); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_filename); - __Pyx_INCREF(__pyx_n_u_r); - __Pyx_GIVEREF(__pyx_n_u_r); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_n_u_r); - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_encoding, __pyx_kp_u_utf_8) < 0) __PYX_ERR(0, 276, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_fileobj = __pyx_t_4; - __pyx_t_4 = 0; - } - __pyx_L3:; - - /* "fontTools/feaLib/lexer.py":277 - * filename, closing = file_or_path, True - * fileobj = open(filename, "r", encoding="utf-8") - * data = fileobj.read() # <<<<<<<<<<<<<< - * filename = getattr(fileobj, "name", None) - * if closing: - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_fileobj, __pyx_n_s_read); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_2, }; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_data = __pyx_t_4; - __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":278 - * fileobj = open(filename, "r", encoding="utf-8") - * data = fileobj.read() - * filename = getattr(fileobj, "name", None) # <<<<<<<<<<<<<< - * if closing: - * fileobj.close() - */ - __pyx_t_4 = __Pyx_GetAttr3(__pyx_v_fileobj, __pyx_n_u_name, Py_None); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_XDECREF_SET(__pyx_v_filename, __pyx_t_4); - __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":279 - * data = fileobj.read() - * filename = getattr(fileobj, "name", None) - * if closing: # <<<<<<<<<<<<<< - * fileobj.close() - * return Lexer(data, filename) - */ - if (__pyx_v_closing) { - - /* "fontTools/feaLib/lexer.py":280 - * filename = getattr(fileobj, "name", None) - * if closing: - * fileobj.close() # <<<<<<<<<<<<<< - * return Lexer(data, filename) - * - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_fileobj, __pyx_n_s_close); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_2, }; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "fontTools/feaLib/lexer.py":279 - * data = fileobj.read() - * filename = getattr(fileobj, "name", None) - * if closing: # <<<<<<<<<<<<<< - * fileobj.close() - * return Lexer(data, filename) - */ - } - - /* "fontTools/feaLib/lexer.py":281 - * if closing: - * fileobj.close() - * return Lexer(data, filename) # <<<<<<<<<<<<<< - * - * def scan_anonymous_block(self, tag): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Lexer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_v_data, __pyx_v_filename}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":270 - * raise StopIteration() - * - * @staticmethod # <<<<<<<<<<<<<< - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.make_lexer_", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_fileobj); - __Pyx_XDECREF(__pyx_v_filename); - __Pyx_XDECREF(__pyx_v_data); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":283 - * return Lexer(data, filename) - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * return self.lexers_[-1].scan_anonymous_block(tag) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block, "IncludingLexer.scan_anonymous_block(self, tag)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block = {"scan_anonymous_block", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - PyObject *__pyx_v_tag = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("scan_anonymous_block (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_tag,0}; - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 283, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_tag)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 283, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, 1); __PYX_ERR(0, 283, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scan_anonymous_block") < 0)) __PYX_ERR(0, 283, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_self = values[0]; - __pyx_v_tag = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("scan_anonymous_block", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 283, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block(__pyx_self, __pyx_v_self, __pyx_v_tag); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_14IncludingLexer_10scan_anonymous_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_tag) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("scan_anonymous_block", 0); - - /* "fontTools/feaLib/lexer.py":284 - * - * def scan_anonymous_block(self, tag): - * return self.lexers_[-1].scan_anonymous_block(tag) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 284, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 284, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_scan_anonymous_block); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 284, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_tag}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 284, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":283 - * return Lexer(data, filename) - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * return self.lexers_[-1].scan_anonymous_block(tag) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("fontTools.feaLib.lexer.IncludingLexer.scan_anonymous_block", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "fontTools/feaLib/lexer.py":290 - * """Lexer that does not follow `include` statements, emits them as-is.""" - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * return next(self.lexers_[0]) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__, "NonIncludingLexer.__next__(self)"); -static PyMethodDef __pyx_mdef_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__ = {"__next__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__}; -static PyObject *__pyx_pw_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_self = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_self)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 290, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__next__") < 0)) __PYX_ERR(0, 290, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_self = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__next__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 290, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("fontTools.feaLib.lexer.NonIncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__(__pyx_self, __pyx_v_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_9fontTools_6feaLib_5lexer_17NonIncludingLexer___next__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__next__", 0); - - /* "fontTools/feaLib/lexer.py":291 - * - * def __next__(self): # Python 3 - * return next(self.lexers_[0]) # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lexers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyIter_Next(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "fontTools/feaLib/lexer.py":290 - * """Lexer that does not follow `include` statements, emits them as-is.""" - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * return next(self.lexers_[0]) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("fontTools.feaLib.lexer.NonIncludingLexer.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif -/* #### Code section: pystring_table ### */ - -static int __Pyx_CreateStringTabAndInitStrings(void) { - __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0}, - {&__pyx_kp_u_0, __pyx_k_0, sizeof(__pyx_k_0), 0, 1, 0, 0}, - {&__pyx_kp_u_0123456789, __pyx_k_0123456789, sizeof(__pyx_k_0123456789), 0, 1, 0, 0}, - {&__pyx_kp_u_0123456789ABCDEFabcdef, __pyx_k_0123456789ABCDEFabcdef, sizeof(__pyx_k_0123456789ABCDEFabcdef), 0, 1, 0, 0}, - {&__pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef, __pyx_k_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef, sizeof(__pyx_k_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef), 0, 1, 0, 1}, - {&__pyx_n_s_ANONYMOUS_BLOCK, __pyx_k_ANONYMOUS_BLOCK, sizeof(__pyx_k_ANONYMOUS_BLOCK), 0, 0, 1, 1}, - {&__pyx_n_u_ANONYMOUS_BLOCK, __pyx_k_ANONYMOUS_BLOCK, sizeof(__pyx_k_ANONYMOUS_BLOCK), 0, 1, 0, 1}, - {&__pyx_kp_s_A_Lexer_that_follows_include_sta, __pyx_k_A_Lexer_that_follows_include_sta, sizeof(__pyx_k_A_Lexer_that_follows_include_sta), 0, 0, 1, 0}, - {&__pyx_kp_u_A_Za_z_0_9, __pyx_k_A_Za_z_0_9, sizeof(__pyx_k_A_Za_z_0_9), 0, 1, 0, 0}, - {&__pyx_n_s_CHAR_DIGIT, __pyx_k_CHAR_DIGIT, sizeof(__pyx_k_CHAR_DIGIT), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_HEXDIGIT, __pyx_k_CHAR_HEXDIGIT, sizeof(__pyx_k_CHAR_HEXDIGIT), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_LETTER, __pyx_k_CHAR_LETTER, sizeof(__pyx_k_CHAR_LETTER), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_NAME_CONTINUATION, __pyx_k_CHAR_NAME_CONTINUATION, sizeof(__pyx_k_CHAR_NAME_CONTINUATION), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_NAME_START, __pyx_k_CHAR_NAME_START, sizeof(__pyx_k_CHAR_NAME_START), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_NEWLINE, __pyx_k_CHAR_NEWLINE, sizeof(__pyx_k_CHAR_NEWLINE), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_SYMBOL, __pyx_k_CHAR_SYMBOL, sizeof(__pyx_k_CHAR_SYMBOL), 0, 0, 1, 1}, - {&__pyx_n_s_CHAR_WHITESPACE, __pyx_k_CHAR_WHITESPACE, sizeof(__pyx_k_CHAR_WHITESPACE), 0, 0, 1, 1}, - {&__pyx_n_s_CID, __pyx_k_CID, sizeof(__pyx_k_CID), 0, 0, 1, 1}, - {&__pyx_n_u_CID, __pyx_k_CID, sizeof(__pyx_k_CID), 0, 1, 0, 1}, - {&__pyx_n_s_COMMENT, __pyx_k_COMMENT, sizeof(__pyx_k_COMMENT), 0, 0, 1, 1}, - {&__pyx_n_u_COMMENT, __pyx_k_COMMENT, sizeof(__pyx_k_COMMENT), 0, 1, 0, 1}, - {&__pyx_kp_u_Expected_after_file_name, __pyx_k_Expected_after_file_name, sizeof(__pyx_k_Expected_after_file_name), 0, 1, 0, 0}, - {&__pyx_kp_u_Expected_before_file_name, __pyx_k_Expected_before_file_name, sizeof(__pyx_k_Expected_before_file_name), 0, 1, 0, 0}, - {&__pyx_kp_u_Expected_file_name, __pyx_k_Expected_file_name, sizeof(__pyx_k_Expected_file_name), 0, 1, 0, 0}, - {&__pyx_kp_u_Expected_glyph_class_name, __pyx_k_Expected_glyph_class_name, sizeof(__pyx_k_Expected_glyph_class_name), 0, 1, 0, 0}, - {&__pyx_kp_u_Expected_s_to_terminate_anonymou, __pyx_k_Expected_s_to_terminate_anonymou, sizeof(__pyx_k_Expected_s_to_terminate_anonymou), 0, 1, 0, 0}, - {&__pyx_kp_u_Expected_to_terminate_string, __pyx_k_Expected_to_terminate_string, sizeof(__pyx_k_Expected_to_terminate_string), 0, 1, 0, 0}, - {&__pyx_n_s_FILENAME, __pyx_k_FILENAME, sizeof(__pyx_k_FILENAME), 0, 0, 1, 1}, - {&__pyx_n_u_FILENAME, __pyx_k_FILENAME, sizeof(__pyx_k_FILENAME), 0, 1, 0, 1}, - {&__pyx_n_s_FLOAT, __pyx_k_FLOAT, sizeof(__pyx_k_FLOAT), 0, 0, 1, 1}, - {&__pyx_n_u_FLOAT, __pyx_k_FLOAT, sizeof(__pyx_k_FLOAT), 0, 1, 0, 1}, - {&__pyx_n_s_FeatureLibError, __pyx_k_FeatureLibError, sizeof(__pyx_k_FeatureLibError), 0, 0, 1, 1}, - {&__pyx_n_s_FeatureLibLocation, __pyx_k_FeatureLibLocation, sizeof(__pyx_k_FeatureLibLocation), 0, 0, 1, 1}, - {&__pyx_n_s_FileNotFoundError, __pyx_k_FileNotFoundError, sizeof(__pyx_k_FileNotFoundError), 0, 0, 1, 1}, - {&__pyx_n_s_GLYPHCLASS, __pyx_k_GLYPHCLASS, sizeof(__pyx_k_GLYPHCLASS), 0, 0, 1, 1}, - {&__pyx_n_u_GLYPHCLASS, __pyx_k_GLYPHCLASS, sizeof(__pyx_k_GLYPHCLASS), 0, 1, 0, 1}, - {&__pyx_kp_u_Glyph_class_names_must_consist_o, __pyx_k_Glyph_class_names_must_consist_o, sizeof(__pyx_k_Glyph_class_names_must_consist_o), 0, 1, 0, 0}, - {&__pyx_kp_u_Glyph_class_names_must_not_be_lo, __pyx_k_Glyph_class_names_must_not_be_lo, sizeof(__pyx_k_Glyph_class_names_must_not_be_lo), 0, 1, 0, 0}, - {&__pyx_n_s_HEXADECIMAL, __pyx_k_HEXADECIMAL, sizeof(__pyx_k_HEXADECIMAL), 0, 0, 1, 1}, - {&__pyx_n_u_HEXADECIMAL, __pyx_k_HEXADECIMAL, sizeof(__pyx_k_HEXADECIMAL), 0, 1, 0, 1}, - {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, - {&__pyx_n_s_IncludedFeaNotFound, __pyx_k_IncludedFeaNotFound, sizeof(__pyx_k_IncludedFeaNotFound), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer, __pyx_k_IncludingLexer, sizeof(__pyx_k_IncludingLexer), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer___init, __pyx_k_IncludingLexer___init, sizeof(__pyx_k_IncludingLexer___init), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer___iter, __pyx_k_IncludingLexer___iter, sizeof(__pyx_k_IncludingLexer___iter), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer___next, __pyx_k_IncludingLexer___next, sizeof(__pyx_k_IncludingLexer___next), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer_make_lexer, __pyx_k_IncludingLexer_make_lexer, sizeof(__pyx_k_IncludingLexer_make_lexer), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer_next, __pyx_k_IncludingLexer_next, sizeof(__pyx_k_IncludingLexer_next), 0, 0, 1, 1}, - {&__pyx_n_s_IncludingLexer_scan_anonymous_bl, __pyx_k_IncludingLexer_scan_anonymous_bl, sizeof(__pyx_k_IncludingLexer_scan_anonymous_bl), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer, __pyx_k_Lexer, sizeof(__pyx_k_Lexer), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer___init, __pyx_k_Lexer___init, sizeof(__pyx_k_Lexer___init), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer___iter, __pyx_k_Lexer___iter, sizeof(__pyx_k_Lexer___iter), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer___next, __pyx_k_Lexer___next, sizeof(__pyx_k_Lexer___next), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer_location, __pyx_k_Lexer_location, sizeof(__pyx_k_Lexer_location), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer_next, __pyx_k_Lexer_next, sizeof(__pyx_k_Lexer_next), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer_next_2, __pyx_k_Lexer_next_2, sizeof(__pyx_k_Lexer_next_2), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer_scan_anonymous_block, __pyx_k_Lexer_scan_anonymous_block, sizeof(__pyx_k_Lexer_scan_anonymous_block), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer_scan_over, __pyx_k_Lexer_scan_over, sizeof(__pyx_k_Lexer_scan_over), 0, 0, 1, 1}, - {&__pyx_n_s_Lexer_scan_until, __pyx_k_Lexer_scan_until, sizeof(__pyx_k_Lexer_scan_until), 0, 0, 1, 1}, - {&__pyx_kp_s_Lexer_that_does_not_follow_inclu, __pyx_k_Lexer_that_does_not_follow_inclu, sizeof(__pyx_k_Lexer_that_does_not_follow_inclu), 0, 0, 1, 0}, - {&__pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_k_Lib_fontTools_feaLib_lexer_py, sizeof(__pyx_k_Lib_fontTools_feaLib_lexer_py), 0, 0, 1, 0}, - {&__pyx_n_s_MODE_FILENAME, __pyx_k_MODE_FILENAME, sizeof(__pyx_k_MODE_FILENAME), 0, 0, 1, 1}, - {&__pyx_n_s_MODE_NORMAL, __pyx_k_MODE_NORMAL, sizeof(__pyx_k_MODE_NORMAL), 0, 0, 1, 1}, - {&__pyx_n_s_NAME, __pyx_k_NAME, sizeof(__pyx_k_NAME), 0, 0, 1, 1}, - {&__pyx_n_u_NAME, __pyx_k_NAME, sizeof(__pyx_k_NAME), 0, 1, 0, 1}, - {&__pyx_n_s_NEWLINE, __pyx_k_NEWLINE, sizeof(__pyx_k_NEWLINE), 0, 0, 1, 1}, - {&__pyx_n_u_NEWLINE, __pyx_k_NEWLINE, sizeof(__pyx_k_NEWLINE), 0, 1, 0, 1}, - {&__pyx_n_u_NORMAL, __pyx_k_NORMAL, sizeof(__pyx_k_NORMAL), 0, 1, 0, 1}, - {&__pyx_n_s_NUMBER, __pyx_k_NUMBER, sizeof(__pyx_k_NUMBER), 0, 0, 1, 1}, - {&__pyx_n_u_NUMBER, __pyx_k_NUMBER, sizeof(__pyx_k_NUMBER), 0, 1, 0, 1}, - {&__pyx_n_s_NUMBERS, __pyx_k_NUMBERS, sizeof(__pyx_k_NUMBERS), 0, 0, 1, 1}, - {&__pyx_n_s_NonIncludingLexer, __pyx_k_NonIncludingLexer, sizeof(__pyx_k_NonIncludingLexer), 0, 0, 1, 1}, - {&__pyx_n_s_NonIncludingLexer___next, __pyx_k_NonIncludingLexer___next, sizeof(__pyx_k_NonIncludingLexer___next), 0, 0, 1, 1}, - {&__pyx_n_s_OCTAL, __pyx_k_OCTAL, sizeof(__pyx_k_OCTAL), 0, 0, 1, 1}, - {&__pyx_n_u_OCTAL, __pyx_k_OCTAL, sizeof(__pyx_k_OCTAL), 0, 1, 0, 1}, - {&__pyx_n_s_RE_GLYPHCLASS, __pyx_k_RE_GLYPHCLASS, sizeof(__pyx_k_RE_GLYPHCLASS), 0, 0, 1, 1}, - {&__pyx_n_s_STRING, __pyx_k_STRING, sizeof(__pyx_k_STRING), 0, 0, 1, 1}, - {&__pyx_n_u_STRING, __pyx_k_STRING, sizeof(__pyx_k_STRING), 0, 1, 0, 1}, - {&__pyx_n_s_SYMBOL, __pyx_k_SYMBOL, sizeof(__pyx_k_SYMBOL), 0, 0, 1, 1}, - {&__pyx_n_u_SYMBOL, __pyx_k_SYMBOL, sizeof(__pyx_k_SYMBOL), 0, 1, 0, 1}, - {&__pyx_n_s_StopIteration, __pyx_k_StopIteration, sizeof(__pyx_k_StopIteration), 0, 0, 1, 1}, - {&__pyx_kp_u_Too_many_recursive_includes, __pyx_k_Too_many_recursive_includes, sizeof(__pyx_k_Too_many_recursive_includes), 0, 1, 0, 0}, - {&__pyx_kp_u_Unexpected_character_r, __pyx_k_Unexpected_character_r, sizeof(__pyx_k_Unexpected_character_r), 0, 1, 0, 0}, - {&__pyx_kp_u__10, __pyx_k__10, sizeof(__pyx_k__10), 0, 1, 0, 0}, - {&__pyx_kp_u__11, __pyx_k__11, sizeof(__pyx_k__11), 0, 1, 0, 0}, - {&__pyx_kp_u__12, __pyx_k__12, sizeof(__pyx_k__12), 0, 1, 0, 0}, - {&__pyx_n_s__13, __pyx_k__13, sizeof(__pyx_k__13), 0, 0, 1, 1}, - {&__pyx_kp_u__16, __pyx_k__16, sizeof(__pyx_k__16), 0, 1, 0, 0}, - {&__pyx_kp_u__17, __pyx_k__17, sizeof(__pyx_k__17), 0, 1, 0, 0}, - {&__pyx_kp_u__18, __pyx_k__18, sizeof(__pyx_k__18), 0, 1, 0, 0}, - {&__pyx_kp_u__19, __pyx_k__19, sizeof(__pyx_k__19), 0, 1, 0, 0}, - {&__pyx_kp_u__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 1, 0, 0}, - {&__pyx_kp_u__20, __pyx_k__20, sizeof(__pyx_k__20), 0, 1, 0, 0}, - {&__pyx_kp_u__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 1, 0, 0}, - {&__pyx_kp_u__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 0}, - {&__pyx_kp_u__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0}, - {&__pyx_n_s__51, __pyx_k__51, sizeof(__pyx_k__51), 0, 0, 1, 1}, - {&__pyx_kp_u__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0}, - {&__pyx_kp_u__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0}, - {&__pyx_kp_u__8, __pyx_k__8, sizeof(__pyx_k__8), 0, 1, 0, 0}, - {&__pyx_kp_u__9, __pyx_k__9, sizeof(__pyx_k__9), 0, 1, 0, 0}, - {&__pyx_n_s_append, __pyx_k_append, sizeof(__pyx_k_append), 0, 0, 1, 1}, - {&__pyx_n_s_asyncio_coroutines, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, - {&__pyx_n_s_class_getitem, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1}, - {&__pyx_n_s_closing, __pyx_k_closing, sizeof(__pyx_k_closing), 0, 0, 1, 1}, - {&__pyx_n_s_column, __pyx_k_column, sizeof(__pyx_k_column), 0, 0, 1, 1}, - {&__pyx_n_s_compile, __pyx_k_compile, sizeof(__pyx_k_compile), 0, 0, 1, 1}, - {&__pyx_n_s_cur_char, __pyx_k_cur_char, sizeof(__pyx_k_cur_char), 0, 0, 1, 1}, - {&__pyx_n_s_curpath, __pyx_k_curpath, sizeof(__pyx_k_curpath), 0, 0, 1, 1}, - {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, - {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_n_s_dirname, __pyx_k_dirname, sizeof(__pyx_k_dirname), 0, 0, 1, 1}, - {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, - {&__pyx_n_s_encoding, __pyx_k_encoding, sizeof(__pyx_k_encoding), 0, 0, 1, 1}, - {&__pyx_n_s_err, __pyx_k_err, sizeof(__pyx_k_err), 0, 0, 1, 1}, - {&__pyx_n_s_featurefile, __pyx_k_featurefile, sizeof(__pyx_k_featurefile), 0, 0, 1, 1}, - {&__pyx_n_s_featurefilepath, __pyx_k_featurefilepath, sizeof(__pyx_k_featurefilepath), 0, 0, 1, 1}, - {&__pyx_kp_u_features, __pyx_k_features, sizeof(__pyx_k_features), 0, 1, 0, 0}, - {&__pyx_n_s_file_or_path, __pyx_k_file_or_path, sizeof(__pyx_k_file_or_path), 0, 0, 1, 1}, - {&__pyx_n_s_filename, __pyx_k_filename, sizeof(__pyx_k_filename), 0, 0, 1, 1}, - {&__pyx_n_s_filename_2, __pyx_k_filename_2, sizeof(__pyx_k_filename_2), 0, 0, 1, 1}, - {&__pyx_n_s_fileobj, __pyx_k_fileobj, sizeof(__pyx_k_fileobj), 0, 0, 1, 1}, - {&__pyx_n_s_fname_location, __pyx_k_fname_location, sizeof(__pyx_k_fname_location), 0, 0, 1, 1}, - {&__pyx_n_s_fname_token, __pyx_k_fname_token, sizeof(__pyx_k_fname_token), 0, 0, 1, 1}, - {&__pyx_n_s_fname_type, __pyx_k_fname_type, sizeof(__pyx_k_fname_type), 0, 0, 1, 1}, - {&__pyx_n_s_fontTools_feaLib_error, __pyx_k_fontTools_feaLib_error, sizeof(__pyx_k_fontTools_feaLib_error), 0, 0, 1, 1}, - {&__pyx_n_s_fontTools_feaLib_lexer, __pyx_k_fontTools_feaLib_lexer, sizeof(__pyx_k_fontTools_feaLib_lexer), 0, 0, 1, 1}, - {&__pyx_n_s_fontTools_feaLib_location, __pyx_k_fontTools_feaLib_location, sizeof(__pyx_k_fontTools_feaLib_location), 0, 0, 1, 1}, - {&__pyx_n_s_getcwd, __pyx_k_getcwd, sizeof(__pyx_k_getcwd), 0, 0, 1, 1}, - {&__pyx_n_s_glyphclass, __pyx_k_glyphclass, sizeof(__pyx_k_glyphclass), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_u_include, __pyx_k_include, sizeof(__pyx_k_include), 0, 1, 0, 1}, - {&__pyx_n_s_includeDir, __pyx_k_includeDir, sizeof(__pyx_k_includeDir), 0, 0, 1, 1}, - {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, - {&__pyx_n_s_init_subclass, __pyx_k_init_subclass, sizeof(__pyx_k_init_subclass), 0, 0, 1, 1}, - {&__pyx_n_s_initializing, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, - {&__pyx_n_s_is_coroutine, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, - {&__pyx_n_s_isabs, __pyx_k_isabs, sizeof(__pyx_k_isabs), 0, 0, 1, 1}, - {&__pyx_n_s_iter, __pyx_k_iter, sizeof(__pyx_k_iter), 0, 0, 1, 1}, - {&__pyx_n_s_join, __pyx_k_join, sizeof(__pyx_k_join), 0, 0, 1, 1}, - {&__pyx_n_s_lexer, __pyx_k_lexer, sizeof(__pyx_k_lexer), 0, 0, 1, 1}, - {&__pyx_n_s_lexers, __pyx_k_lexers, sizeof(__pyx_k_lexers), 0, 0, 1, 1}, - {&__pyx_n_s_limit, __pyx_k_limit, sizeof(__pyx_k_limit), 0, 0, 1, 1}, - {&__pyx_n_s_line, __pyx_k_line, sizeof(__pyx_k_line), 0, 0, 1, 1}, - {&__pyx_n_s_line_start, __pyx_k_line_start, sizeof(__pyx_k_line_start), 0, 0, 1, 1}, - {&__pyx_n_s_location, __pyx_k_location, sizeof(__pyx_k_location), 0, 0, 1, 1}, - {&__pyx_n_s_location_2, __pyx_k_location_2, sizeof(__pyx_k_location_2), 0, 0, 1, 1}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_make_lexer, __pyx_k_make_lexer, sizeof(__pyx_k_make_lexer), 0, 0, 1, 1}, - {&__pyx_n_s_match, __pyx_k_match, sizeof(__pyx_k_match), 0, 0, 1, 1}, - {&__pyx_n_s_maxsplit, __pyx_k_maxsplit, sizeof(__pyx_k_maxsplit), 0, 0, 1, 1}, - {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, - {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, - {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, - {&__pyx_n_s_mro_entries, __pyx_k_mro_entries, sizeof(__pyx_k_mro_entries), 0, 0, 1, 1}, - {&__pyx_n_u_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 1, 0, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_next, __pyx_k_next, sizeof(__pyx_k_next), 0, 0, 1, 1}, - {&__pyx_n_s_next_2, __pyx_k_next_2, sizeof(__pyx_k_next_2), 0, 0, 1, 1}, - {&__pyx_n_s_next_3, __pyx_k_next_3, sizeof(__pyx_k_next_3), 0, 0, 1, 1}, - {&__pyx_n_s_next_char, __pyx_k_next_char, sizeof(__pyx_k_next_char), 0, 0, 1, 1}, - {&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1}, - {&__pyx_n_s_open, __pyx_k_open, sizeof(__pyx_k_open), 0, 0, 1, 1}, - {&__pyx_n_s_os, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1}, - {&__pyx_n_s_p, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1}, - {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, - {&__pyx_n_s_pop, __pyx_k_pop, sizeof(__pyx_k_pop), 0, 0, 1, 1}, - {&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1}, - {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, - {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, - {&__pyx_n_u_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 1, 0, 1}, - {&__pyx_n_s_re, __pyx_k_re, sizeof(__pyx_k_re), 0, 0, 1, 1}, - {&__pyx_n_s_read, __pyx_k_read, sizeof(__pyx_k_read), 0, 0, 1, 1}, - {&__pyx_n_u_read, __pyx_k_read, sizeof(__pyx_k_read), 0, 1, 0, 1}, - {&__pyx_n_s_regexp, __pyx_k_regexp, sizeof(__pyx_k_regexp), 0, 0, 1, 1}, - {&__pyx_kp_u_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 1, 0, 0}, - {&__pyx_kp_u_s_2, __pyx_k_s_2, sizeof(__pyx_k_s_2), 0, 1, 0, 0}, - {&__pyx_n_s_scan_anonymous_block, __pyx_k_scan_anonymous_block, sizeof(__pyx_k_scan_anonymous_block), 0, 0, 1, 1}, - {&__pyx_n_s_scan_over, __pyx_k_scan_over, sizeof(__pyx_k_scan_over), 0, 0, 1, 1}, - {&__pyx_n_s_scan_until, __pyx_k_scan_until, sizeof(__pyx_k_scan_until), 0, 0, 1, 1}, - {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, - {&__pyx_n_s_set_name, __pyx_k_set_name, sizeof(__pyx_k_set_name), 0, 0, 1, 1}, - {&__pyx_n_s_spec, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, - {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, - {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, - {&__pyx_n_s_staticmethod, __pyx_k_staticmethod, sizeof(__pyx_k_staticmethod), 0, 0, 1, 1}, - {&__pyx_n_s_stop_at, __pyx_k_stop_at, sizeof(__pyx_k_stop_at), 0, 0, 1, 1}, - {&__pyx_n_s_string, __pyx_k_string, sizeof(__pyx_k_string), 0, 0, 1, 1}, - {&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1}, - {&__pyx_n_s_sub, __pyx_k_sub, sizeof(__pyx_k_sub), 0, 0, 1, 1}, - {&__pyx_n_s_super, __pyx_k_super, sizeof(__pyx_k_super), 0, 0, 1, 1}, - {&__pyx_n_s_tag, __pyx_k_tag, sizeof(__pyx_k_tag), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_n_s_text, __pyx_k_text, sizeof(__pyx_k_text), 0, 0, 1, 1}, - {&__pyx_n_s_text_2, __pyx_k_text_2, sizeof(__pyx_k_text_2), 0, 0, 1, 1}, - {&__pyx_n_s_text_length, __pyx_k_text_length, sizeof(__pyx_k_text_length), 0, 0, 1, 1}, - {&__pyx_n_s_token, __pyx_k_token, sizeof(__pyx_k_token), 0, 0, 1, 1}, - {&__pyx_n_s_token_type, __pyx_k_token_type, sizeof(__pyx_k_token_type), 0, 0, 1, 1}, - {&__pyx_kp_u_utf_8, __pyx_k_utf_8, sizeof(__pyx_k_utf_8), 0, 1, 0, 0}, - {&__pyx_n_s_valid, __pyx_k_valid, sizeof(__pyx_k_valid), 0, 0, 1, 1}, - {&__pyx_n_u_xX, __pyx_k_xX, sizeof(__pyx_k_xX), 0, 1, 0, 1}, - {0, 0, 0, 0, 0, 0, 0} - }; - return __Pyx_InitStrings(__pyx_string_tab); -} -/* #### Code section: cached_builtins ### */ -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 8, __pyx_L1_error) - __pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 13, __pyx_L1_error) - __pyx_builtin_staticmethod = __Pyx_GetBuiltinName(__pyx_n_s_staticmethod); if (!__pyx_builtin_staticmethod) __PYX_ERR(0, 270, __pyx_L1_error) - __pyx_builtin_StopIteration = __Pyx_GetBuiltinName(__pyx_n_s_StopIteration); if (!__pyx_builtin_StopIteration) __PYX_ERR(0, 75, __pyx_L1_error) - __pyx_builtin_open = __Pyx_GetBuiltinName(__pyx_n_s_open); if (!__pyx_builtin_open) __PYX_ERR(0, 276, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: cached_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "fontTools/feaLib/lexer.py":13 - * - * - * class Lexer(object): # <<<<<<<<<<<<<< - * NUMBER = "NUMBER" - * HEXADECIMAL = "HEXADECIMAL" - */ - __pyx_tuple__14 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__14); - __Pyx_GIVEREF(__pyx_tuple__14); - __pyx_tuple__15 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_GIVEREF(__pyx_tuple__15); - - /* "fontTools/feaLib/lexer.py":43 - * MODE_FILENAME_ = "FILENAME" - * - * def __init__(self, text, filename): # <<<<<<<<<<<<<< - * self.filename_ = filename - * self.line_ = 1 - */ - __pyx_tuple__21 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_text, __pyx_n_s_filename); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__21); - __Pyx_GIVEREF(__pyx_tuple__21); - __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_init, 43, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 43, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":52 - * self.mode_ = Lexer.MODE_NORMAL_ - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_tuple__23 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__23); - __Pyx_GIVEREF(__pyx_tuple__23); - __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_iter, 52, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 52, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":55 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next_3, 55, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 55, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":58 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while True: - * token_type, token, location = self.next_() - */ - __pyx_tuple__26 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_token_type, __pyx_n_s_token, __pyx_n_s_location_2); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 58, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__26); - __Pyx_GIVEREF(__pyx_tuple__26); - __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next, 58, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 58, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":64 - * return (token_type, token, location) - * - * def location_(self): # <<<<<<<<<<<<<< - * column = self.pos_ - self.line_start_ + 1 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - */ - __pyx_tuple__28 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_column); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 64, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__28); - __Pyx_GIVEREF(__pyx_tuple__28); - __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_location, 64, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 64, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":68 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - * - * def next_(self): # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_WHITESPACE_) - * location = self.location_() - */ - __pyx_tuple__30 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_location_2, __pyx_n_s_start, __pyx_n_s_text, __pyx_n_s_limit, __pyx_n_s_cur_char, __pyx_n_s_next_char, __pyx_n_s_glyphclass, __pyx_n_s_token, __pyx_n_s_string); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(0, 68, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__30); - __Pyx_GIVEREF(__pyx_tuple__30); - __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next_2, 68, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(0, 68, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":169 - * raise FeatureLibError("Unexpected character: %r" % cur_char, location) - * - * def scan_over_(self, valid): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] in valid: - */ - __pyx_tuple__32 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_valid, __pyx_n_s_p); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__32); - __Pyx_GIVEREF(__pyx_tuple__32); - __pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_over, 169, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(0, 169, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":175 - * self.pos_ = p - * - * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] not in stop_at: - */ - __pyx_tuple__34 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_stop_at, __pyx_n_s_p); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 175, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__34); - __Pyx_GIVEREF(__pyx_tuple__34); - __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_until, 175, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 175, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":181 - * self.pos_ = p - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * location = self.location_() - * tag = tag.strip() - */ - __pyx_tuple__36 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_tag, __pyx_n_s_location_2, __pyx_n_s_regexp, __pyx_n_s_split); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__36); - __Pyx_GIVEREF(__pyx_tuple__36); - __pyx_codeobj__37 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__36, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_anonymous_block, 181, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__37)) __PYX_ERR(0, 181, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":196 - * - * - * class IncludingLexer(object): # <<<<<<<<<<<<<< - * """A Lexer that follows include statements. - * - */ - __pyx_tuple__38 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__38)) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__38); - __Pyx_GIVEREF(__pyx_tuple__38); - __pyx_tuple__39 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__39); - __Pyx_GIVEREF(__pyx_tuple__39); - - /* "fontTools/feaLib/lexer.py":211 - * """ - * - * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< - * """Initializes an IncludingLexer. - * - */ - __pyx_tuple__40 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_featurefile, __pyx_n_s_includeDir); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(0, 211, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__40); - __Pyx_GIVEREF(__pyx_tuple__40); - __pyx_codeobj__41 = (PyObject*)__Pyx_PyCode_New(2, 0, 1, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__40, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_init, 211, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__41)) __PYX_ERR(0, 211, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":225 - * self.includeDir = includeDir - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_iter, 225, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(0, 225, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":228 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - __pyx_codeobj__43 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next_3, 228, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__43)) __PYX_ERR(0, 228, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":231 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while self.lexers_: - * lexer = self.lexers_[-1] - */ - __pyx_tuple__44 = PyTuple_Pack(11, __pyx_n_s_self, __pyx_n_s_lexer, __pyx_n_s_token_type, __pyx_n_s_token, __pyx_n_s_location_2, __pyx_n_s_fname_type, __pyx_n_s_fname_token, __pyx_n_s_fname_location, __pyx_n_s_path, __pyx_n_s_curpath, __pyx_n_s_err); if (unlikely(!__pyx_tuple__44)) __PYX_ERR(0, 231, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__44); - __Pyx_GIVEREF(__pyx_tuple__44); - __pyx_codeobj__45 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__44, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next, 231, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__45)) __PYX_ERR(0, 231, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":270 - * raise StopIteration() - * - * @staticmethod # <<<<<<<<<<<<<< - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): - */ - __pyx_tuple__46 = PyTuple_Pack(5, __pyx_n_s_file_or_path, __pyx_n_s_fileobj, __pyx_n_s_closing, __pyx_n_s_filename, __pyx_n_s_data); if (unlikely(!__pyx_tuple__46)) __PYX_ERR(0, 270, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__46); - __Pyx_GIVEREF(__pyx_tuple__46); - __pyx_codeobj__47 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__46, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_make_lexer, 270, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__47)) __PYX_ERR(0, 270, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":283 - * return Lexer(data, filename) - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * return self.lexers_[-1].scan_anonymous_block(tag) - * - */ - __pyx_tuple__48 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_tag); if (unlikely(!__pyx_tuple__48)) __PYX_ERR(0, 283, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__48); - __Pyx_GIVEREF(__pyx_tuple__48); - __pyx_codeobj__49 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__48, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_scan_anonymous_block, 283, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__49)) __PYX_ERR(0, 283, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":290 - * """Lexer that does not follow `include` statements, emits them as-is.""" - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * return next(self.lexers_[0]) - */ - __pyx_codeobj__50 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Lib_fontTools_feaLib_lexer_py, __pyx_n_s_next, 290, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__50)) __PYX_ERR(0, 290, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} -/* #### Code section: init_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) { - __pyx_umethod_PyList_Type_pop.type = (PyObject*)&PyList_Type; - __pyx_umethod_PyList_Type_pop.method_name = &__pyx_n_s_pop; - if (__Pyx_CreateStringTabAndInitStrings() < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_16 = PyInt_FromLong(16); if (unlikely(!__pyx_int_16)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: init_globals ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - return 0; -} -/* #### Code section: init_module ### */ - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_lexer(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_lexer}, - {0, NULL} -}; -#endif - -#ifdef __cplusplus -namespace { - struct PyModuleDef __pyx_moduledef = - #else - static struct PyModuleDef __pyx_moduledef = - #endif - { - PyModuleDef_HEAD_INIT, - "lexer", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #elif CYTHON_USE_MODULE_STATE - sizeof(__pyx_mstate), /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - #if CYTHON_USE_MODULE_STATE - __pyx_m_traverse, /* m_traverse */ - __pyx_m_clear, /* m_clear */ - NULL /* m_free */ - #else - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ - #endif - }; - #ifdef __cplusplus -} /* anonymous namespace */ -#endif -#endif - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initlexer(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initlexer(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_lexer(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_lexer(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) -#else -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) -#endif -{ - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { -#if CYTHON_COMPILING_IN_LIMITED_API - result = PyModule_AddObject(module, to_name, value); -#else - result = PyDict_SetItemString(moddict, to_name, value); -#endif - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - CYTHON_UNUSED_VAR(def); - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; -#if CYTHON_COMPILING_IN_LIMITED_API - moddict = module; -#else - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; -#endif - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_lexer(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - int stringtab_initialized = 0; - #if CYTHON_USE_MODULE_STATE - int pystate_addmodule_run = 0; - #endif - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'lexer' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("lexer", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #elif CYTHON_USE_MODULE_STATE - __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - { - int add_module_result = PyState_AddModule(__pyx_t_1, &__pyx_moduledef); - __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to lexer pseudovariable */ - if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - pystate_addmodule_run = 1; - } - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #endif - CYTHON_UNUSED_VAR(__pyx_t_1); - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_lexer(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - PyEval_InitThreads(); - #endif - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - stringtab_initialized = 1; - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_fontTools__feaLib__lexer) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "fontTools.feaLib.lexer")) { - if (unlikely((PyDict_SetItemString(modules, "fontTools.feaLib.lexer", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - (void)__Pyx_modinit_type_init_code(); - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "fontTools/feaLib/lexer.py":1 - * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound # <<<<<<<<<<<<<< - * from fontTools.feaLib.location import FeatureLibLocation - * import re - */ - __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_FeatureLibError); - __Pyx_GIVEREF(__pyx_n_s_FeatureLibError); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_FeatureLibError); - __Pyx_INCREF(__pyx_n_s_IncludedFeaNotFound); - __Pyx_GIVEREF(__pyx_n_s_IncludedFeaNotFound); - PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_IncludedFeaNotFound); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_fontTools_feaLib_error, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_FeatureLibError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_FeatureLibError, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_IncludedFeaNotFound); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_IncludedFeaNotFound, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":2 - * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound - * from fontTools.feaLib.location import FeatureLibLocation # <<<<<<<<<<<<<< - * import re - * import os - */ - __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_n_s_FeatureLibLocation); - __Pyx_GIVEREF(__pyx_n_s_FeatureLibLocation); - PyList_SET_ITEM(__pyx_t_3, 0, __pyx_n_s_FeatureLibLocation); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_fontTools_feaLib_location, __pyx_t_3, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_FeatureLibLocation); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_FeatureLibLocation, __pyx_t_3) < 0) __PYX_ERR(0, 2, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":3 - * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound - * from fontTools.feaLib.location import FeatureLibLocation - * import re # <<<<<<<<<<<<<< - * import os - * - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_re, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_re, __pyx_t_2) < 0) __PYX_ERR(0, 3, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":4 - * from fontTools.feaLib.location import FeatureLibLocation - * import re - * import os # <<<<<<<<<<<<<< - * - * try: - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_os, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_os, __pyx_t_2) < 0) __PYX_ERR(0, 4, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":6 - * import os - * - * try: # <<<<<<<<<<<<<< - * import cython - * except ImportError: - */ - { - (void)__pyx_t_1; (void)__pyx_t_4; (void)__pyx_t_5; /* mark used */ - /*try:*/ { - - /* "fontTools/feaLib/lexer.py":7 - * - * try: - * import cython # <<<<<<<<<<<<<< - * except ImportError: - * # if cython not installed, use mock module with no-op decorators and types - */ - } - } - - /* "fontTools/feaLib/lexer.py":13 - * - * - * class Lexer(object): # <<<<<<<<<<<<<< - * NUMBER = "NUMBER" - * HEXADECIMAL = "HEXADECIMAL" - */ - __pyx_t_2 = __Pyx_PEP560_update_bases(__pyx_tuple__15); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_Lexer, __pyx_n_s_Lexer, (PyObject *) NULL, __pyx_n_s_fontTools_feaLib_lexer, (PyObject *) NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (__pyx_t_2 != __pyx_tuple__15) { - if (unlikely((PyDict_SetItemString(__pyx_t_6, "__orig_bases__", __pyx_tuple__15) < 0))) __PYX_ERR(0, 13, __pyx_L1_error) - } - - /* "fontTools/feaLib/lexer.py":14 - * - * class Lexer(object): - * NUMBER = "NUMBER" # <<<<<<<<<<<<<< - * HEXADECIMAL = "HEXADECIMAL" - * OCTAL = "OCTAL" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NUMBER, __pyx_n_u_NUMBER) < 0) __PYX_ERR(0, 14, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":15 - * class Lexer(object): - * NUMBER = "NUMBER" - * HEXADECIMAL = "HEXADECIMAL" # <<<<<<<<<<<<<< - * OCTAL = "OCTAL" - * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_HEXADECIMAL, __pyx_n_u_HEXADECIMAL) < 0) __PYX_ERR(0, 15, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":16 - * NUMBER = "NUMBER" - * HEXADECIMAL = "HEXADECIMAL" - * OCTAL = "OCTAL" # <<<<<<<<<<<<<< - * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) - * FLOAT = "FLOAT" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_OCTAL, __pyx_n_u_OCTAL) < 0) __PYX_ERR(0, 16, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":17 - * HEXADECIMAL = "HEXADECIMAL" - * OCTAL = "OCTAL" - * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) # <<<<<<<<<<<<<< - * FLOAT = "FLOAT" - * STRING = "STRING" - */ - __pyx_t_7 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_NUMBER); - if (unlikely(!__pyx_t_7)) { - PyErr_Clear(); - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_NUMBER); - } - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_HEXADECIMAL); - if (unlikely(!__pyx_t_8)) { - PyErr_Clear(); - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_HEXADECIMAL); - } - if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_OCTAL); - if (unlikely(!__pyx_t_9)) { - PyErr_Clear(); - __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_OCTAL); - } - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = PyTuple_New(3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_9); - PyTuple_SET_ITEM(__pyx_t_10, 2, __pyx_t_9); - __pyx_t_7 = 0; - __pyx_t_8 = 0; - __pyx_t_9 = 0; - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NUMBERS, __pyx_t_10) < 0) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":18 - * OCTAL = "OCTAL" - * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) - * FLOAT = "FLOAT" # <<<<<<<<<<<<<< - * STRING = "STRING" - * NAME = "NAME" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_FLOAT, __pyx_n_u_FLOAT) < 0) __PYX_ERR(0, 18, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":19 - * NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) - * FLOAT = "FLOAT" - * STRING = "STRING" # <<<<<<<<<<<<<< - * NAME = "NAME" - * FILENAME = "FILENAME" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_STRING, __pyx_n_u_STRING) < 0) __PYX_ERR(0, 19, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":20 - * FLOAT = "FLOAT" - * STRING = "STRING" - * NAME = "NAME" # <<<<<<<<<<<<<< - * FILENAME = "FILENAME" - * GLYPHCLASS = "GLYPHCLASS" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NAME, __pyx_n_u_NAME) < 0) __PYX_ERR(0, 20, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":21 - * STRING = "STRING" - * NAME = "NAME" - * FILENAME = "FILENAME" # <<<<<<<<<<<<<< - * GLYPHCLASS = "GLYPHCLASS" - * CID = "CID" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_FILENAME, __pyx_n_u_FILENAME) < 0) __PYX_ERR(0, 21, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":22 - * NAME = "NAME" - * FILENAME = "FILENAME" - * GLYPHCLASS = "GLYPHCLASS" # <<<<<<<<<<<<<< - * CID = "CID" - * SYMBOL = "SYMBOL" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_GLYPHCLASS, __pyx_n_u_GLYPHCLASS) < 0) __PYX_ERR(0, 22, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":23 - * FILENAME = "FILENAME" - * GLYPHCLASS = "GLYPHCLASS" - * CID = "CID" # <<<<<<<<<<<<<< - * SYMBOL = "SYMBOL" - * COMMENT = "COMMENT" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CID, __pyx_n_u_CID) < 0) __PYX_ERR(0, 23, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":24 - * GLYPHCLASS = "GLYPHCLASS" - * CID = "CID" - * SYMBOL = "SYMBOL" # <<<<<<<<<<<<<< - * COMMENT = "COMMENT" - * NEWLINE = "NEWLINE" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_SYMBOL, __pyx_n_u_SYMBOL) < 0) __PYX_ERR(0, 24, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":25 - * CID = "CID" - * SYMBOL = "SYMBOL" - * COMMENT = "COMMENT" # <<<<<<<<<<<<<< - * NEWLINE = "NEWLINE" - * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_COMMENT, __pyx_n_u_COMMENT) < 0) __PYX_ERR(0, 25, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":26 - * SYMBOL = "SYMBOL" - * COMMENT = "COMMENT" - * NEWLINE = "NEWLINE" # <<<<<<<<<<<<<< - * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" - * - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_NEWLINE, __pyx_n_u_NEWLINE) < 0) __PYX_ERR(0, 26, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":27 - * COMMENT = "COMMENT" - * NEWLINE = "NEWLINE" - * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" # <<<<<<<<<<<<<< - * - * CHAR_WHITESPACE_ = " \t" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_ANONYMOUS_BLOCK, __pyx_n_u_ANONYMOUS_BLOCK) < 0) __PYX_ERR(0, 27, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":29 - * ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" - * - * CHAR_WHITESPACE_ = " \t" # <<<<<<<<<<<<<< - * CHAR_NEWLINE_ = "\r\n" - * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_WHITESPACE, __pyx_kp_u__16) < 0) __PYX_ERR(0, 29, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":30 - * - * CHAR_WHITESPACE_ = " \t" - * CHAR_NEWLINE_ = "\r\n" # <<<<<<<<<<<<<< - * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" - * CHAR_DIGIT_ = "0123456789" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_NEWLINE, __pyx_kp_u__17) < 0) __PYX_ERR(0, 30, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":31 - * CHAR_WHITESPACE_ = " \t" - * CHAR_NEWLINE_ = "\r\n" - * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" # <<<<<<<<<<<<<< - * CHAR_DIGIT_ = "0123456789" - * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_SYMBOL, __pyx_kp_u__18) < 0) __PYX_ERR(0, 31, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":32 - * CHAR_NEWLINE_ = "\r\n" - * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" - * CHAR_DIGIT_ = "0123456789" # <<<<<<<<<<<<<< - * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" - * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_DIGIT, __pyx_kp_u_0123456789) < 0) __PYX_ERR(0, 32, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":33 - * CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" - * CHAR_DIGIT_ = "0123456789" - * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" # <<<<<<<<<<<<<< - * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_HEXDIGIT, __pyx_kp_u_0123456789ABCDEFabcdef) < 0) __PYX_ERR(0, 33, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":34 - * CHAR_DIGIT_ = "0123456789" - * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" - * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" # <<<<<<<<<<<<<< - * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" - * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_LETTER, __pyx_n_u_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef) < 0) __PYX_ERR(0, 34, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":35 - * CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" - * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" # <<<<<<<<<<<<<< - * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" - * - */ - __pyx_t_10 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_CHAR_LETTER); - if (unlikely(!__pyx_t_10)) { - PyErr_Clear(); - __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_CHAR_LETTER); - } - if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_9 = PyNumber_Add(__pyx_t_10, __pyx_kp_u__19); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_NAME_START, __pyx_t_9) < 0) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":36 - * CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - * CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" - * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" # <<<<<<<<<<<<<< - * - * RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") - */ - __pyx_t_9 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_CHAR_LETTER); - if (unlikely(!__pyx_t_9)) { - PyErr_Clear(); - __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_CHAR_LETTER); - } - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = PyObject_GetItem(__pyx_t_6, __pyx_n_s_CHAR_DIGIT); - if (unlikely(!__pyx_t_10)) { - PyErr_Clear(); - __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_CHAR_DIGIT); - } - if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __pyx_t_8 = PyNumber_Add(__pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = PyNumber_Add(__pyx_t_8, __pyx_kp_u__20); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_CHAR_NAME_CONTINUATION, __pyx_t_10) < 0) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":38 - * CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" - * - * RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") # <<<<<<<<<<<<<< - * - * MODE_NORMAL_ = "NORMAL" - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_re); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_compile); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_9); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_9, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_8, __pyx_kp_u_A_Za_z_0_9}; - __pyx_t_10 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+1-__pyx_t_11, 1+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_RE_GLYPHCLASS, __pyx_t_10) < 0) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":40 - * RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") - * - * MODE_NORMAL_ = "NORMAL" # <<<<<<<<<<<<<< - * MODE_FILENAME_ = "FILENAME" - * - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_MODE_NORMAL, __pyx_n_u_NORMAL) < 0) __PYX_ERR(0, 40, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":41 - * - * MODE_NORMAL_ = "NORMAL" - * MODE_FILENAME_ = "FILENAME" # <<<<<<<<<<<<<< - * - * def __init__(self, text, filename): - */ - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_MODE_FILENAME, __pyx_n_u_FILENAME) < 0) __PYX_ERR(0, 41, __pyx_L1_error) - - /* "fontTools/feaLib/lexer.py":43 - * MODE_FILENAME_ = "FILENAME" - * - * def __init__(self, text, filename): # <<<<<<<<<<<<<< - * self.filename_ = filename - * self.line_ = 1 - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_1__init__, 0, __pyx_n_s_Lexer___init, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_init, __pyx_t_10) < 0) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":52 - * self.mode_ = Lexer.MODE_NORMAL_ - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_3__iter__, 0, __pyx_n_s_Lexer___iter, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_iter, __pyx_t_10) < 0) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":55 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_5next, 0, __pyx_n_s_Lexer_next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__25)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next_3, __pyx_t_10) < 0) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":58 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while True: - * token_type, token, location = self.next_() - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_7__next__, 0, __pyx_n_s_Lexer___next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 58, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next, __pyx_t_10) < 0) __PYX_ERR(0, 58, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":64 - * return (token_type, token, location) - * - * def location_(self): # <<<<<<<<<<<<<< - * column = self.pos_ - self.line_start_ + 1 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_9location_, 0, __pyx_n_s_Lexer_location, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__29)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 64, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_location, __pyx_t_10) < 0) __PYX_ERR(0, 64, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":68 - * return FeatureLibLocation(self.filename_ or "", self.line_, column) - * - * def next_(self): # <<<<<<<<<<<<<< - * self.scan_over_(Lexer.CHAR_WHITESPACE_) - * location = self.location_() - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_11next_, 0, __pyx_n_s_Lexer_next_2, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__31)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 68, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next_2, __pyx_t_10) < 0) __PYX_ERR(0, 68, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":169 - * raise FeatureLibError("Unexpected character: %r" % cur_char, location) - * - * def scan_over_(self, valid): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] in valid: - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_13scan_over_, 0, __pyx_n_s_Lexer_scan_over, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__33)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_over, __pyx_t_10) < 0) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":175 - * self.pos_ = p - * - * def scan_until_(self, stop_at): # <<<<<<<<<<<<<< - * p = self.pos_ - * while p < self.text_length_ and self.text_[p] not in stop_at: - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_15scan_until_, 0, __pyx_n_s_Lexer_scan_until, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__35)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 175, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_until, __pyx_t_10) < 0) __PYX_ERR(0, 175, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":181 - * self.pos_ = p - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * location = self.location_() - * tag = tag.strip() - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_5Lexer_17scan_anonymous_block, 0, __pyx_n_s_Lexer_scan_anonymous_block, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__37)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_anonymous_block, __pyx_t_10) < 0) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":13 - * - * - * class Lexer(object): # <<<<<<<<<<<<<< - * NUMBER = "NUMBER" - * HEXADECIMAL = "HEXADECIMAL" - */ - __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_Lexer, __pyx_t_2, __pyx_t_6, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_Lexer, __pyx_t_10) < 0) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":196 - * - * - * class IncludingLexer(object): # <<<<<<<<<<<<<< - * """A Lexer that follows include statements. - * - */ - __pyx_t_2 = __Pyx_PEP560_update_bases(__pyx_tuple__39); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_IncludingLexer, __pyx_n_s_IncludingLexer, (PyObject *) NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_kp_s_A_Lexer_that_follows_include_sta); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (__pyx_t_2 != __pyx_tuple__39) { - if (unlikely((PyDict_SetItemString(__pyx_t_6, "__orig_bases__", __pyx_tuple__39) < 0))) __PYX_ERR(0, 196, __pyx_L1_error) - } - - /* "fontTools/feaLib/lexer.py":211 - * """ - * - * def __init__(self, featurefile, *, includeDir=None): # <<<<<<<<<<<<<< - * """Initializes an IncludingLexer. - * - */ - __pyx_t_10 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 211, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (PyDict_SetItem(__pyx_t_10, __pyx_n_s_includeDir, ((PyObject *)Py_None)) < 0) __PYX_ERR(0, 211, __pyx_L1_error) - __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_1__init__, 0, __pyx_n_s_IncludingLexer___init, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__41)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 211, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_CyFunction_SetDefaultsKwDict(__pyx_t_9, __pyx_t_10); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_init, __pyx_t_9) < 0) __PYX_ERR(0, 211, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":225 - * self.includeDir = includeDir - * - * def __iter__(self): # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_3__iter__, 0, __pyx_n_s_IncludingLexer___iter, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__42)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 225, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_iter, __pyx_t_9) < 0) __PYX_ERR(0, 225, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":228 - * return self - * - * def next(self): # Python 2 # <<<<<<<<<<<<<< - * return self.__next__() - * - */ - __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_5next, 0, __pyx_n_s_IncludingLexer_next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__43)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next_3, __pyx_t_9) < 0) __PYX_ERR(0, 228, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":231 - * return self.__next__() - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * while self.lexers_: - * lexer = self.lexers_[-1] - */ - __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_7__next__, 0, __pyx_n_s_IncludingLexer___next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__45)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 231, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_next, __pyx_t_9) < 0) __PYX_ERR(0, 231, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "fontTools/feaLib/lexer.py":270 - * raise StopIteration() - * - * @staticmethod # <<<<<<<<<<<<<< - * def make_lexer_(file_or_path): - * if hasattr(file_or_path, "read"): - */ - __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_9make_lexer_, __Pyx_CYFUNCTION_STATICMETHOD, __pyx_n_s_IncludingLexer_make_lexer, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__47)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 270, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_staticmethod, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 270, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_make_lexer, __pyx_t_10) < 0) __PYX_ERR(0, 270, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":283 - * return Lexer(data, filename) - * - * def scan_anonymous_block(self, tag): # <<<<<<<<<<<<<< - * return self.lexers_[-1].scan_anonymous_block(tag) - * - */ - __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_14IncludingLexer_11scan_anonymous_block, 0, __pyx_n_s_IncludingLexer_scan_anonymous_bl, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__49)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 283, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__Pyx_SetNameInClass(__pyx_t_6, __pyx_n_s_scan_anonymous_block, __pyx_t_10) < 0) __PYX_ERR(0, 283, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "fontTools/feaLib/lexer.py":196 - * - * - * class IncludingLexer(object): # <<<<<<<<<<<<<< - * """A Lexer that follows include statements. - * - */ - __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_IncludingLexer, __pyx_t_2, __pyx_t_6, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_IncludingLexer, __pyx_t_10) < 0) __PYX_ERR(0, 196, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":287 - * - * - * class NonIncludingLexer(IncludingLexer): # <<<<<<<<<<<<<< - * """Lexer that does not follow `include` statements, emits them as-is.""" - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_IncludingLexer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PEP560_update_bases(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_10 = __Pyx_Py3MetaclassPrepare(__pyx_t_6, __pyx_t_2, __pyx_n_s_NonIncludingLexer, __pyx_n_s_NonIncludingLexer, (PyObject *) NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_kp_s_Lexer_that_does_not_follow_inclu); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - if (__pyx_t_2 != __pyx_t_3) { - if (unlikely((PyDict_SetItemString(__pyx_t_10, "__orig_bases__", __pyx_t_3) < 0))) __PYX_ERR(0, 287, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":290 - * """Lexer that does not follow `include` statements, emits them as-is.""" - * - * def __next__(self): # Python 3 # <<<<<<<<<<<<<< - * return next(self.lexers_[0]) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_6feaLib_5lexer_17NonIncludingLexer_1__next__, 0, __pyx_n_s_NonIncludingLexer___next, NULL, __pyx_n_s_fontTools_feaLib_lexer, __pyx_d, ((PyObject *)__pyx_codeobj__50)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 290, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_SetNameInClass(__pyx_t_10, __pyx_n_s_next, __pyx_t_3) < 0) __PYX_ERR(0, 290, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "fontTools/feaLib/lexer.py":287 - * - * - * class NonIncludingLexer(IncludingLexer): # <<<<<<<<<<<<<< - * """Lexer that does not follow `include` statements, emits them as-is.""" - * - */ - __pyx_t_3 = __Pyx_Py3ClassCreate(__pyx_t_6, __pyx_n_s_NonIncludingLexer, __pyx_t_2, __pyx_t_10, NULL, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_NonIncludingLexer, __pyx_t_3) < 0) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "fontTools/feaLib/lexer.py":1 - * from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound # <<<<<<<<<<<<<< - * from fontTools.feaLib.location import FeatureLibLocation - * import re - */ - __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - if (__pyx_m) { - if (__pyx_d && stringtab_initialized) { - __Pyx_AddTraceback("init fontTools.feaLib.lexer", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - #if !CYTHON_USE_MODULE_STATE - Py_CLEAR(__pyx_m); - #else - Py_DECREF(__pyx_m); - if (pystate_addmodule_run) { - PyObject *tp, *value, *tb; - PyErr_Fetch(&tp, &value, &tb); - PyState_RemoveModule(&__pyx_moduledef); - PyErr_Restore(tp, value, tb); - } - #endif - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init fontTools.feaLib.lexer"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} -/* #### Code section: cleanup_globals ### */ -/* #### Code section: cleanup_module ### */ -/* #### Code section: main_method ### */ -/* #### Code section: utility_code_pragmas ### */ -#ifdef _MSC_VER -#pragma warning( push ) -/* Warning 4127: conditional expression is constant - * Cython uses constant conditional expressions to allow in inline functions to be optimized at - * compile-time, so this warning is not useful - */ -#pragma warning( disable : 4127 ) -#endif - - - -/* #### Code section: utility_code_def ### */ - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i= 0x030C00A6 - PyObject *current_exception = tstate->current_exception; - if (unlikely(!current_exception)) return 0; - exc_type = (PyObject*) Py_TYPE(current_exception); - if (exc_type == err) return 1; -#else - exc_type = tstate->curexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; -#endif - #if CYTHON_AVOID_BORROWED_REFS - Py_INCREF(exc_type); - #endif - if (unlikely(PyTuple_Check(err))) { - result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - } else { - result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err); - } - #if CYTHON_AVOID_BORROWED_REFS - Py_DECREF(exc_type); - #endif - return result; -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { -#if PY_VERSION_HEX >= 0x030C00A6 - PyObject *tmp_value; - assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value))); - if (value) { - #if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb)) - #endif - PyException_SetTraceback(value, tb); - } - tmp_value = tstate->current_exception; - tstate->current_exception = value; - Py_XDECREF(tmp_value); -#else - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#endif -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { -#if PY_VERSION_HEX >= 0x030C00A6 - PyObject* exc_value; - exc_value = tstate->current_exception; - tstate->current_exception = 0; - *value = exc_value; - *type = NULL; - *tb = NULL; - if (exc_value) { - *type = (PyObject*) Py_TYPE(exc_value); - Py_INCREF(*type); - #if CYTHON_COMPILING_IN_CPYTHON - *tb = ((PyBaseExceptionObject*) exc_value)->traceback; - Py_XINCREF(*tb); - #else - *tb = PyException_GetTraceback(exc_value); - #endif - } -#else - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#endif -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_b, name); - if (unlikely(!result) && !PyErr_Occurred()) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* TupleAndListFromArray */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { - PyObject *v; - Py_ssize_t i; - for (i = 0; i < length; i++) { - v = dest[i] = src[i]; - Py_INCREF(v); - } -} -static CYTHON_INLINE PyObject * -__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - Py_INCREF(__pyx_empty_tuple); - return __pyx_empty_tuple; - } - res = PyTuple_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); - return res; -} -static CYTHON_INLINE PyObject * -__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - return PyList_New(0); - } - res = PyList_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); - return res; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* fastcall */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) -{ - Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); - for (i = 0; i < n; i++) - { - if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; - } - for (i = 0; i < n; i++) - { - int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); - if (unlikely(eq != 0)) { - if (unlikely(eq < 0)) return NULL; // error - return kwvalues[i]; - } - } - return NULL; // not found (no exception set) -} -#endif - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); - while (1) { - if (kwds_is_tuple) { - if (pos >= PyTuple_GET_SIZE(kwds)) break; - key = PyTuple_GET_ITEM(kwds, pos); - value = kwvalues[pos]; - pos++; - } - else - { - if (!PyDict_Next(kwds, &pos, &key, &value)) break; - } - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = ( - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key) - ); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - #if PY_MAJOR_VERSION < 3 - PyErr_Format(PyExc_TypeError, - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - PyErr_Format(PyExc_TypeError, - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* PyObjectSetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_setattro)) - return tp->tp_setattro(obj, attr_name, value); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_setattr)) - return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); -#endif - return PyObject_SetAttr(obj, attr_name, value); -} -#endif - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#elif CYTHON_COMPILING_IN_LIMITED_API - if (unlikely(!__pyx_m)) { - return NULL; - } - result = PyObject_GetAttr(__pyx_m, name); - if (likely(result)) { - return result; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectFastCall */ -static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { - PyObject *argstuple; - PyObject *result; - size_t i; - argstuple = PyTuple_New((Py_ssize_t)nargs); - if (unlikely(!argstuple)) return NULL; - for (i = 0; i < nargs; i++) { - Py_INCREF(args[i]); - PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]); - } - result = __Pyx_PyObject_Call(func, argstuple, kwargs); - Py_DECREF(argstuple); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { - Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); -#if CYTHON_COMPILING_IN_CPYTHON - if (nargs == 0 && kwargs == NULL) { -#if defined(__Pyx_CyFunction_USED) && defined(NDEBUG) - if (__Pyx_IsCyOrPyCFunction(func)) -#else - if (PyCFunction_Check(func)) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - } - else if (nargs == 1 && kwargs == NULL) { - if (PyCFunction_Check(func)) - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, args[0]); - } - } - } -#endif - #if PY_VERSION_HEX < 0x030800B1 - #if CYTHON_FAST_PYCCALL - if (PyCFunction_Check(func)) { - if (kwargs) { - return _PyCFunction_FastCallDict(func, args, nargs, kwargs); - } else { - return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); - } - } - #if PY_VERSION_HEX >= 0x030700A1 - if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { - return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); - } - #endif - #endif - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); - } - #endif - #endif - #if CYTHON_VECTORCALL - vectorcallfunc f = _PyVectorcall_Function(func); - if (f) { - return f(func, args, (size_t)nargs, kwargs); - } - #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL - if (__Pyx_CyFunction_CheckExact(func)) { - __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); - if (f) return f(func, args, (size_t)nargs, kwargs); - } - #endif - if (nargs == 0) { - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, kwargs); - } - return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* IterFinish */ -static CYTHON_INLINE int __Pyx_IterFinish(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - PyObject* exc_type = __Pyx_PyErr_CurrentExceptionType(); - if (unlikely(exc_type)) { - if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) - return -1; - __Pyx_PyErr_Clear(); - return 0; - } - return 0; -} - -/* UnpackItemEndCheck */ -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } - return __Pyx_IterFinish(); -} - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - x = (long)((unsigned long)a + (unsigned long)b); - if (likely((x^a) >= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - if (unlikely(__Pyx_PyLong_IsZero(op1))) { - return __Pyx_NewRef(op2); - } - if (likely(__Pyx_PyLong_IsCompact(op1))) { - a = __Pyx_PyLong_CompactValue(op1); - } else { - const digit* digits = __Pyx_PyLong_Digits(op1); - const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(op1); - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* PyObjectCallNoArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { - PyObject *arg = NULL; - return __Pyx_PyObject_FastCall(func, (&arg)+1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* RaiseException */ -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - __Pyx_PyThreadState_declare - CYTHON_UNUSED_VAR(cause); - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { - #if PY_VERSION_HEX >= 0x030C00A6 - PyException_SetTraceback(value, tb); - #elif CYTHON_FAST_THREAD_STATE - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#else - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (unlikely(!j)) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; - PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; - if (mm && mm->mp_subscript) { - PyObject *r, *key = PyInt_FromSsize_t(i); - if (unlikely(!key)) return NULL; - r = mm->mp_subscript(o, key); - Py_DECREF(key); - return r; - } - if (likely(sm && sm->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { - Py_ssize_t l = sm->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return sm->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* PyObjectCallOneArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *args[2] = {NULL, arg}; - return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { - PyObject *runerr = NULL; - Py_ssize_t key_value; - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, - "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); - __Pyx_DECREF_TypeName(index_type_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { - __Pyx_TypeName obj_type_name; - if (likely(PyType_Check(obj))) { - PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_n_s_class_getitem); - if (meth) { - PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); - Py_DECREF(meth); - return result; - } - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { - PyTypeObject *tp = Py_TYPE(obj); - PyMappingMethods *mm = tp->tp_as_mapping; - PySequenceMethods *sm = tp->tp_as_sequence; - if (likely(mm && mm->mp_subscript)) { - return mm->mp_subscript(obj, key); - } - if (likely(sm && sm->sq_item)) { - return __Pyx_PyObject_GetIndex(obj, key); - } - return __Pyx_PyObject_GetItem_Slow(obj, key); -} -#endif - -/* SliceObject */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, - Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, - int has_cstart, int has_cstop, int wraparound) { - __Pyx_TypeName obj_type_name; -#if CYTHON_USE_TYPE_SLOTS - PyMappingMethods* mp; -#if PY_MAJOR_VERSION < 3 - PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; - if (likely(ms && ms->sq_slice)) { - if (!has_cstart) { - if (_py_start && (*_py_start != Py_None)) { - cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); - if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstart = 0; - } - if (!has_cstop) { - if (_py_stop && (*_py_stop != Py_None)) { - cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); - if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstop = PY_SSIZE_T_MAX; - } - if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { - Py_ssize_t l = ms->sq_length(obj); - if (likely(l >= 0)) { - if (cstop < 0) { - cstop += l; - if (cstop < 0) cstop = 0; - } - if (cstart < 0) { - cstart += l; - if (cstart < 0) cstart = 0; - } - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - goto bad; - PyErr_Clear(); - } - } - return ms->sq_slice(obj, cstart, cstop); - } -#else - CYTHON_UNUSED_VAR(wraparound); -#endif - mp = Py_TYPE(obj)->tp_as_mapping; - if (likely(mp && mp->mp_subscript)) -#else - CYTHON_UNUSED_VAR(wraparound); -#endif - { - PyObject* result; - PyObject *py_slice, *py_start, *py_stop; - if (_py_slice) { - py_slice = *_py_slice; - } else { - PyObject* owned_start = NULL; - PyObject* owned_stop = NULL; - if (_py_start) { - py_start = *_py_start; - } else { - if (has_cstart) { - owned_start = py_start = PyInt_FromSsize_t(cstart); - if (unlikely(!py_start)) goto bad; - } else - py_start = Py_None; - } - if (_py_stop) { - py_stop = *_py_stop; - } else { - if (has_cstop) { - owned_stop = py_stop = PyInt_FromSsize_t(cstop); - if (unlikely(!py_stop)) { - Py_XDECREF(owned_start); - goto bad; - } - } else - py_stop = Py_None; - } - py_slice = PySlice_New(py_start, py_stop, Py_None); - Py_XDECREF(owned_start); - Py_XDECREF(owned_stop); - if (unlikely(!py_slice)) goto bad; - } -#if CYTHON_USE_TYPE_SLOTS - result = mp->mp_subscript(obj, py_slice); -#else - result = PyObject_GetItem(obj, py_slice); -#endif - if (!_py_slice) { - Py_DECREF(py_slice); - } - return result; - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is unsliceable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); -bad: - return NULL; -} - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - x = (long)((unsigned long)a - (unsigned long)b); - if (likely((x^a) >= 0 || (x^~b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - if (unlikely(__Pyx_PyLong_IsZero(op1))) { - return PyLong_FromLong(-intval); - } - if (likely(__Pyx_PyLong_IsCompact(op1))) { - a = __Pyx_PyLong_CompactValue(op1); - } else { - const digit* digits = __Pyx_PyLong_Digits(op1); - const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(op1); - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - } - x = a - b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla - llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("subtract", return NULL) - result = ((double)a) - (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); -} -#endif - -/* pybytes_as_double */ -static double __Pyx_SlowPyString_AsDouble(PyObject *obj) { - PyObject *float_value; -#if PY_MAJOR_VERSION >= 3 - float_value = PyFloat_FromString(obj); -#else - float_value = PyFloat_FromString(obj, 0); -#endif - if (likely(float_value)) { - double value = PyFloat_AS_DOUBLE(float_value); - Py_DECREF(float_value); - return value; - } - return (double)-1; -} -static const char* __Pyx__PyBytes_AsDouble_Copy(const char* start, char* buffer, Py_ssize_t length) { - int last_was_punctuation = 1; - Py_ssize_t i; - for (i=0; i < length; i++) { - char chr = start[i]; - int is_punctuation = (chr == '_') | (chr == '.') | (chr == 'e') | (chr == 'E'); - *buffer = chr; - buffer += (chr != '_'); - if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; - last_was_punctuation = is_punctuation; - } - if (unlikely(last_was_punctuation)) goto parse_failure; - *buffer = '\0'; - return buffer; -parse_failure: - return NULL; -} -static double __Pyx__PyBytes_AsDouble_inf_nan(const char* start, Py_ssize_t length) { - int matches = 1; - char sign = start[0]; - int is_signed = (sign == '+') | (sign == '-'); - start += is_signed; - length -= is_signed; - switch (start[0]) { - #ifdef Py_NAN - case 'n': - case 'N': - if (unlikely(length != 3)) goto parse_failure; - matches &= (start[1] == 'a' || start[1] == 'A'); - matches &= (start[2] == 'n' || start[2] == 'N'); - if (unlikely(!matches)) goto parse_failure; - return (sign == '-') ? -Py_NAN : Py_NAN; - #endif - case 'i': - case 'I': - if (unlikely(length < 3)) goto parse_failure; - matches &= (start[1] == 'n' || start[1] == 'N'); - matches &= (start[2] == 'f' || start[2] == 'F'); - if (likely(length == 3 && matches)) - return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; - if (unlikely(length != 8)) goto parse_failure; - matches &= (start[3] == 'i' || start[3] == 'I'); - matches &= (start[4] == 'n' || start[4] == 'N'); - matches &= (start[5] == 'i' || start[5] == 'I'); - matches &= (start[6] == 't' || start[6] == 'T'); - matches &= (start[7] == 'y' || start[7] == 'Y'); - if (unlikely(!matches)) goto parse_failure; - return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; - case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': - break; - default: - goto parse_failure; - } - return 0.0; -parse_failure: - return -1.0; -} -static CYTHON_INLINE int __Pyx__PyBytes_AsDouble_IsSpace(char ch) { - return (ch == 0x20) | !((ch < 0x9) | (ch > 0xd)); -} -CYTHON_UNUSED static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length) { - double value; - Py_ssize_t i, digits; - const char *last = start + length; - char *end; - while (__Pyx__PyBytes_AsDouble_IsSpace(*start)) - start++; - while (start < last - 1 && __Pyx__PyBytes_AsDouble_IsSpace(last[-1])) - last--; - length = last - start; - if (unlikely(length <= 0)) goto fallback; - value = __Pyx__PyBytes_AsDouble_inf_nan(start, length); - if (unlikely(value == -1.0)) goto fallback; - if (value != 0.0) return value; - digits = 0; - for (i=0; i < length; digits += start[i++] != '_'); - if (likely(digits == length)) { - value = PyOS_string_to_double(start, &end, NULL); - } else if (digits < 40) { - char number[40]; - last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); - if (unlikely(!last)) goto fallback; - value = PyOS_string_to_double(number, &end, NULL); - } else { - char *number = (char*) PyMem_Malloc((digits + 1) * sizeof(char)); - if (unlikely(!number)) goto fallback; - last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); - if (unlikely(!last)) { - PyMem_Free(number); - goto fallback; - } - value = PyOS_string_to_double(number, &end, NULL); - PyMem_Free(number); - } - if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { - return value; - } -fallback: - return __Pyx_SlowPyString_AsDouble(obj); -} - -/* pynumber_float */ -static CYTHON_INLINE PyObject* __Pyx__PyNumber_Float(PyObject* obj) { - double val; - if (PyLong_CheckExact(obj)) { -#if CYTHON_USE_PYLONG_INTERNALS - if (likely(__Pyx_PyLong_IsCompact(obj))) { - val = (double) __Pyx_PyLong_CompactValue(obj); - goto no_error; - } -#endif - val = PyLong_AsDouble(obj); - } else if (PyUnicode_CheckExact(obj)) { - val = __Pyx_PyUnicode_AsDouble(obj); - } else if (PyBytes_CheckExact(obj)) { - val = __Pyx_PyBytes_AsDouble(obj); - } else if (PyByteArray_CheckExact(obj)) { - val = __Pyx_PyByteArray_AsDouble(obj); - } else { - return PyNumber_Float(obj); - } - if (unlikely(val == -1 && PyErr_Occurred())) { - return NULL; - } -#if CYTHON_USE_PYLONG_INTERNALS -no_error: -#endif - return PyFloat_FromDouble(val); -} - -/* IterNext */ -static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) { - PyObject* exc_type; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - exc_type = __Pyx_PyErr_CurrentExceptionType(); - if (unlikely(exc_type)) { - if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(defval); - return defval; - } - if (defval) { - Py_INCREF(defval); - return defval; - } - __Pyx_PyErr_SetNone(PyExc_StopIteration); - return NULL; -} -static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) { - __Pyx_TypeName iterator_type_name = __Pyx_PyType_GetName(Py_TYPE(iterator)); - PyErr_Format(PyExc_TypeError, - __Pyx_FMT_TYPENAME " object is not an iterator", iterator_type_name); - __Pyx_DECREF_TypeName(iterator_type_name); -} -static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) { - PyObject* next; - iternextfunc iternext = Py_TYPE(iterator)->tp_iternext; - if (likely(iternext)) { -#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY - next = iternext(iterator); - if (likely(next)) - return next; -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(iternext == &_PyObject_NextNotImplemented)) - return NULL; -#endif -#else - next = PyIter_Next(iterator); - if (likely(next)) - return next; -#endif - } else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) { - __Pyx_PyIter_Next_ErrorNoIterator(iterator); - return NULL; - } -#if !CYTHON_USE_TYPE_SLOTS - else { - next = PyIter_Next(iterator); - if (likely(next)) - return next; - } -#endif - return __Pyx_PyIter_Next2Default(defval); -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - PyObject *exc_value = exc_info->exc_value; - if (exc_value == NULL || exc_value == Py_None) { - *value = NULL; - *type = NULL; - *tb = NULL; - } else { - *value = exc_value; - Py_INCREF(*value); - *type = (PyObject*) Py_TYPE(exc_value); - Py_INCREF(*type); - *tb = PyException_GetTraceback(exc_value); - } - #elif CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); - #endif -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 - _PyErr_StackItem *exc_info = tstate->exc_info; - PyObject *tmp_value = exc_info->exc_value; - exc_info->exc_value = value; - Py_XDECREF(tmp_value); - Py_XDECREF(type); - Py_XDECREF(tb); - #else - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); - #endif -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type = NULL, *local_value, *local_tb = NULL; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if PY_VERSION_HEX >= 0x030C00A6 - local_value = tstate->current_exception; - tstate->current_exception = 0; - if (likely(local_value)) { - local_type = (PyObject*) Py_TYPE(local_value); - Py_INCREF(local_type); - local_tb = PyException_GetTraceback(local_value); - } - #else - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; - #endif -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE && PY_VERSION_HEX >= 0x030C00A6 - if (unlikely(tstate->current_exception)) -#elif CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - #if PY_VERSION_HEX >= 0x030B00a4 - tmp_value = exc_info->exc_value; - exc_info->exc_value = local_value; - tmp_type = NULL; - tmp_tb = NULL; - Py_XDECREF(local_type); - Py_XDECREF(local_tb); - #else - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - #endif - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* PyObjectGetMethod */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { - PyObject *attr; -#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP - __Pyx_TypeName type_name; - PyTypeObject *tp = Py_TYPE(obj); - PyObject *descr; - descrgetfunc f = NULL; - PyObject **dictptr, *dict; - int meth_found = 0; - assert (*method == NULL); - if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; - } - if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { - return 0; - } - descr = _PyType_Lookup(tp, name); - if (likely(descr != NULL)) { - Py_INCREF(descr); -#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR - if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) -#elif PY_MAJOR_VERSION >= 3 - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) - #endif -#else - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr))) - #endif -#endif - { - meth_found = 1; - } else { - f = Py_TYPE(descr)->tp_descr_get; - if (f != NULL && PyDescr_IsData(descr)) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - } - } - dictptr = _PyObject_GetDictPtr(obj); - if (dictptr != NULL && (dict = *dictptr) != NULL) { - Py_INCREF(dict); - attr = __Pyx_PyDict_GetItemStr(dict, name); - if (attr != NULL) { - Py_INCREF(attr); - Py_DECREF(dict); - Py_XDECREF(descr); - goto try_unpack; - } - Py_DECREF(dict); - } - if (meth_found) { - *method = descr; - return 1; - } - if (f != NULL) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - if (likely(descr != NULL)) { - *method = descr; - return 0; - } - type_name = __Pyx_PyType_GetName(tp); - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", - type_name, name); -#else - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", - type_name, PyString_AS_STRING(name)); -#endif - __Pyx_DECREF_TypeName(type_name); - return 0; -#else - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; -#endif -try_unpack: -#if CYTHON_UNPACK_METHODS - if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { - PyObject *function = PyMethod_GET_FUNCTION(attr); - Py_INCREF(function); - Py_DECREF(attr); - *method = function; - return 1; - } -#endif - *method = attr; - return 0; -} - -/* PyObjectCallMethod0 */ -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { - PyObject *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_CallOneArg(method, obj); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) goto bad; - result = __Pyx_PyObject_CallNoArg(method); - Py_DECREF(method); -bad: - return result; -} - -/* UnpackUnboundCMethod */ -static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *args, PyObject *kwargs) { - PyObject *selfless_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args)); - if (unlikely(!selfless_args)) return NULL; - PyObject *result = PyObject_Call(method, selfless_args, kwargs); - Py_DECREF(selfless_args); - return result; -} -static PyMethodDef __Pyx_UnboundCMethod_Def = { - "CythonUnboundCMethod", - __PYX_REINTERPRET_FUNCION(PyCFunction, __Pyx_SelflessCall), - METH_VARARGS | METH_KEYWORDS, - NULL -}; -static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { - PyObject *method; - method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); - if (unlikely(!method)) - return -1; - target->method = method; -#if CYTHON_COMPILING_IN_CPYTHON - #if PY_MAJOR_VERSION >= 3 - if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) - #else - if (likely(!PyCFunction_Check(method))) - #endif - { - PyMethodDescrObject *descr = (PyMethodDescrObject*) method; - target->func = descr->d_method->ml_meth; - target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); - } else -#endif -#if defined(CYTHON_COMPILING_IN_PYPY) -#elif PY_VERSION_HEX >= 0x03090000 - if (PyCFunction_CheckExact(method)) -#else - if (PyCFunction_Check(method)) -#endif - { - PyObject *self; - int self_found; -#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY - self = PyObject_GetAttrString(method, "__self__"); - if (!self) { - PyErr_Clear(); - } -#else - self = PyCFunction_GET_SELF(method); -#endif - self_found = (self && self != Py_None); -#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY - Py_XDECREF(self); -#endif - if (self_found) { - PyObject *unbound_method = PyCFunction_New(&__Pyx_UnboundCMethod_Def, method); - if (unlikely(!unbound_method)) return -1; - Py_DECREF(method); - target->method = unbound_method; - } - } - return 0; -} - -/* CallUnboundCMethod0 */ -static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { - PyObject *args, *result = NULL; - if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_ASSUME_SAFE_MACROS - args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); -#else - args = PyTuple_Pack(1, self); - if (unlikely(!args)) goto bad; -#endif - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - Py_DECREF(args); -bad: - return result; -} - -/* pop */ -static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L) { - if (__Pyx_IS_TYPE(L, &PySet_Type)) { - return PySet_Pop(L); - } - return __Pyx_PyObject_CallMethod0(L, __pyx_n_s_pop); -} -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L) { - if (likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) { - __Pyx_SET_SIZE(L, Py_SIZE(L) - 1); - return PyList_GET_ITEM(L, PyList_GET_SIZE(L)); - } - return __Pyx_CallUnboundCMethod0(&__pyx_umethod_PyList_Type_pop, L); -} -#endif - -/* PyObjectCall2Args */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args[3] = {NULL, arg1, arg2}; - return __Pyx_PyObject_FastCall(function, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* PyObjectCallMethod1 */ -static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { - PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); - Py_DECREF(method); - return result; -} -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { - PyObject *method = NULL, *result; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_Call2Args(method, obj, arg); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) return NULL; - return __Pyx__PyObject_CallMethod1(method, arg); -} - -/* append */ -static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) { - if (likely(PyList_CheckExact(L))) { - if (unlikely(__Pyx_PyList_Append(L, x) < 0)) return -1; - } else { - PyObject* retval = __Pyx_PyObject_CallMethod1(L, __pyx_n_s_append, x); - if (unlikely(!retval)) - return -1; - Py_DECREF(retval); - } - return 0; -} - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (cls == a || cls == b) return 1; - mro = cls->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - PyObject *base = PyTuple_GET_ITEM(mro, i); - if (base == (PyObject *)a || base == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - if (exc_type1) { - return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); - } else { - return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i= 0x030B00a4 - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_value = exc_info->exc_value; - exc_info->exc_value = *value; - if (tmp_value == NULL || tmp_value == Py_None) { - Py_XDECREF(tmp_value); - tmp_value = NULL; - tmp_type = NULL; - tmp_tb = NULL; - } else { - tmp_type = (PyObject*) Py_TYPE(tmp_value); - Py_INCREF(tmp_type); - #if CYTHON_COMPILING_IN_CPYTHON - tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback; - Py_XINCREF(tmp_tb); - #else - tmp_tb = PyException_GetTraceback(tmp_value); - #endif - } - #elif CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* HasAttr */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { - PyObject *r; - if (unlikely(!__Pyx_PyBaseString_Check(n))) { - PyErr_SetString(PyExc_TypeError, - "hasattr(): attribute name must be string"); - return -1; - } - r = __Pyx_GetAttr(o, n); - if (!r) { - PyErr_Clear(); - return 0; - } else { - Py_DECREF(r); - return 1; - } -} - -/* GetAttr3 */ -static PyObject *__Pyx_GetAttr3Default(PyObject *d) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(d); - return d; -} -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { - PyObject *r; -#if CYTHON_USE_TYPE_SLOTS - if (likely(PyString_Check(n))) { - r = __Pyx_PyObject_GetAttrStrNoError(o, n); - if (unlikely(!r) && likely(!PyErr_Occurred())) { - r = __Pyx_NewRef(d); - } - return r; - } -#endif - r = PyObject_GetAttr(o, n); - return (likely(r)) ? r : __Pyx_GetAttr3Default(d); -} - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *module = 0; - PyObject *empty_dict = 0; - PyObject *empty_list = 0; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (unlikely(!py_import)) - goto bad; - if (!from_list) { - empty_list = PyList_New(0); - if (unlikely(!empty_list)) - goto bad; - from_list = empty_list; - } - #endif - empty_dict = PyDict_New(); - if (unlikely(!empty_dict)) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, 1); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, 1); - #endif - if (unlikely(!module)) { - if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (unlikely(!py_level)) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, __pyx_d, empty_dict, from_list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, level); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, level); - #endif - #endif - } - } -bad: - Py_XDECREF(empty_dict); - Py_XDECREF(empty_list); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - return module; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - const char* module_name_str = 0; - PyObject* module_name = 0; - PyObject* module_dot = 0; - PyObject* full_name = 0; - PyErr_Clear(); - module_name_str = PyModule_GetName(module); - if (unlikely(!module_name_str)) { goto modbad; } - module_name = PyUnicode_FromString(module_name_str); - if (unlikely(!module_name)) { goto modbad; } - module_dot = PyUnicode_Concat(module_name, __pyx_kp_u__8); - if (unlikely(!module_dot)) { goto modbad; } - full_name = PyUnicode_Concat(module_dot, name); - if (unlikely(!full_name)) { goto modbad; } - #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - { - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - goto modbad; - value = PyObject_GetItem(modules, full_name); - } - #else - value = PyImport_GetModule(full_name); - #endif - modbad: - Py_XDECREF(full_name); - Py_XDECREF(module_dot); - Py_XDECREF(module_name); - } - if (unlikely(!value)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* ImportDottedModule */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { - PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; - if (unlikely(PyErr_Occurred())) { - PyErr_Clear(); - } - if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { - partial_name = name; - } else { - slice = PySequence_GetSlice(parts_tuple, 0, count); - if (unlikely(!slice)) - goto bad; - sep = PyUnicode_FromStringAndSize(".", 1); - if (unlikely(!sep)) - goto bad; - partial_name = PyUnicode_Join(sep, slice); - } - PyErr_Format( -#if PY_MAJOR_VERSION < 3 - PyExc_ImportError, - "No module named '%s'", PyString_AS_STRING(partial_name)); -#else -#if PY_VERSION_HEX >= 0x030600B1 - PyExc_ModuleNotFoundError, -#else - PyExc_ImportError, -#endif - "No module named '%U'", partial_name); -#endif -bad: - Py_XDECREF(sep); - Py_XDECREF(slice); - Py_XDECREF(partial_name); - return NULL; -} -#endif -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { - PyObject *imported_module; -#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - return NULL; - imported_module = __Pyx_PyDict_GetItemStr(modules, name); - Py_XINCREF(imported_module); -#else - imported_module = PyImport_GetModule(name); -#endif - return imported_module; -} -#endif -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple) { - Py_ssize_t i, nparts; - nparts = PyTuple_GET_SIZE(parts_tuple); - for (i=1; i < nparts && module; i++) { - PyObject *part, *submodule; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - part = PyTuple_GET_ITEM(parts_tuple, i); -#else - part = PySequence_ITEM(parts_tuple, i); -#endif - submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(part); -#endif - Py_DECREF(module); - module = submodule; - } - if (unlikely(!module)) { - return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); - } - return module; -} -#endif -static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if PY_MAJOR_VERSION < 3 - PyObject *module, *from_list, *star = __pyx_n_s__13; - CYTHON_UNUSED_VAR(parts_tuple); - from_list = PyList_New(1); - if (unlikely(!from_list)) - return NULL; - Py_INCREF(star); - PyList_SET_ITEM(from_list, 0, star); - module = __Pyx_Import(name, from_list, 0); - Py_DECREF(from_list); - return module; -#else - PyObject *imported_module; - PyObject *module = __Pyx_Import(name, NULL, 0); - if (!parts_tuple || unlikely(!module)) - return module; - imported_module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(imported_module)) { - Py_DECREF(module); - return imported_module; - } - PyErr_Clear(); - return __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); -#endif -} -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 - PyObject *module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(module)) { - PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, __pyx_n_s_spec); - if (likely(spec)) { - PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, __pyx_n_s_initializing); - if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { - Py_DECREF(spec); - spec = NULL; - } - Py_XDECREF(unsafe); - } - if (likely(!spec)) { - PyErr_Clear(); - return module; - } - Py_DECREF(spec); - Py_DECREF(module); - } else if (PyErr_Occurred()) { - PyErr_Clear(); - } -#endif - return __Pyx__ImportDottedModule(name, parts_tuple); -} - -/* Py3UpdateBases */ -static PyObject* -__Pyx_PEP560_update_bases(PyObject *bases) -{ - Py_ssize_t i, j, size_bases; - PyObject *base, *meth, *new_base, *result, *new_bases = NULL; - size_bases = PyTuple_GET_SIZE(bases); - for (i = 0; i < size_bases; i++) { - base = PyTuple_GET_ITEM(bases, i); - if (PyType_Check(base)) { - if (new_bases) { - if (PyList_Append(new_bases, base) < 0) { - goto error; - } - } - continue; - } - meth = __Pyx_PyObject_GetAttrStrNoError(base, __pyx_n_s_mro_entries); - if (!meth && PyErr_Occurred()) { - goto error; - } - if (!meth) { - if (new_bases) { - if (PyList_Append(new_bases, base) < 0) { - goto error; - } - } - continue; - } - new_base = __Pyx_PyObject_CallOneArg(meth, bases); - Py_DECREF(meth); - if (!new_base) { - goto error; - } - if (!PyTuple_Check(new_base)) { - PyErr_SetString(PyExc_TypeError, - "__mro_entries__ must return a tuple"); - Py_DECREF(new_base); - goto error; - } - if (!new_bases) { - if (!(new_bases = PyList_New(i))) { - goto error; - } - for (j = 0; j < i; j++) { - base = PyTuple_GET_ITEM(bases, j); - PyList_SET_ITEM(new_bases, j, base); - Py_INCREF(base); - } - } - j = PyList_GET_SIZE(new_bases); - if (PyList_SetSlice(new_bases, j, j, new_base) < 0) { - goto error; - } - Py_DECREF(new_base); - } - if (!new_bases) { - Py_INCREF(bases); - return bases; - } - result = PyList_AsTuple(new_bases); - Py_DECREF(new_bases); - return result; -error: - Py_XDECREF(new_bases); - return NULL; -} - -/* CalculateMetaclass */ -static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { - Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); - for (i=0; i < nbases; i++) { - PyTypeObject *tmptype; - PyObject *tmp = PyTuple_GET_ITEM(bases, i); - tmptype = Py_TYPE(tmp); -#if PY_MAJOR_VERSION < 3 - if (tmptype == &PyClass_Type) - continue; -#endif - if (!metaclass) { - metaclass = tmptype; - continue; - } - if (PyType_IsSubtype(metaclass, tmptype)) - continue; - if (PyType_IsSubtype(tmptype, metaclass)) { - metaclass = tmptype; - continue; - } - PyErr_SetString(PyExc_TypeError, - "metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases"); - return NULL; - } - if (!metaclass) { -#if PY_MAJOR_VERSION < 3 - metaclass = &PyClass_Type; -#else - metaclass = &PyType_Type; -#endif - } - Py_INCREF((PyObject*) metaclass); - return (PyObject*) metaclass; -} - -/* FixUpExtensionType */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { -#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API - CYTHON_UNUSED_VAR(spec); - CYTHON_UNUSED_VAR(type); -#else - const PyType_Slot *slot = spec->slots; - while (slot && slot->slot && slot->slot != Py_tp_members) - slot++; - if (slot && slot->slot == Py_tp_members) { - int changed = 0; -#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) - const -#endif - PyMemberDef *memb = (PyMemberDef*) slot->pfunc; - while (memb && memb->name) { - if (memb->name[0] == '_' && memb->name[1] == '_') { -#if PY_VERSION_HEX < 0x030900b1 - if (strcmp(memb->name, "__weaklistoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_weaklistoffset = memb->offset; - changed = 1; - } - else if (strcmp(memb->name, "__dictoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_dictoffset = memb->offset; - changed = 1; - } -#if CYTHON_METH_FASTCALL - else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); -#if PY_VERSION_HEX >= 0x030800b4 - type->tp_vectorcall_offset = memb->offset; -#else - type->tp_print = (printfunc) memb->offset; -#endif - changed = 1; - } -#endif -#else - if ((0)); -#endif -#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON - else if (strcmp(memb->name, "__module__") == 0) { - PyObject *descr; - assert(memb->type == T_OBJECT); - assert(memb->flags == 0 || memb->flags == READONLY); - descr = PyDescr_NewMember(type, memb); - if (unlikely(!descr)) - return -1; - if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - changed = 1; - } -#endif - } - memb++; - } - if (changed) - PyType_Modified(type); - } -#endif - return 0; -} -#endif - -/* FetchSharedCythonModule */ -static PyObject *__Pyx_FetchSharedCythonABIModule(void) { - PyObject *abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME); - if (unlikely(!abi_module)) return NULL; - Py_INCREF(abi_module); - return abi_module; -} - -/* FetchCommonType */ -static int __Pyx_VerifyCachedType(PyObject *cached_type, - const char *name, - Py_ssize_t basicsize, - Py_ssize_t expected_basicsize) { - if (!PyType_Check(cached_type)) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s is not a type object", name); - return -1; - } - if (basicsize != expected_basicsize) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s has the wrong size, try recompiling", - name); - return -1; - } - return 0; -} -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { - PyObject* abi_module; - const char* object_name; - PyTypeObject *cached_type = NULL; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - object_name = strrchr(type->tp_name, '.'); - object_name = object_name ? object_name+1 : type->tp_name; - cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - if (__Pyx_VerifyCachedType( - (PyObject *)cached_type, - object_name, - cached_type->tp_basicsize, - type->tp_basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - if (PyType_Ready(type) < 0) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) - goto bad; - Py_INCREF(type); - cached_type = type; -done: - Py_DECREF(abi_module); - return cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#else -static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { - PyObject *abi_module, *cached_type = NULL; - const char* object_name = strrchr(spec->name, '.'); - object_name = object_name ? object_name+1 : spec->name; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - cached_type = PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - Py_ssize_t basicsize; -#if CYTHON_COMPILING_IN_LIMITED_API - PyObject *py_basicsize; - py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); - if (unlikely(!py_basicsize)) goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; -#else - basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; -#endif - if (__Pyx_VerifyCachedType( - cached_type, - object_name, - basicsize, - spec->basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - CYTHON_UNUSED_VAR(module); - cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); - if (unlikely(!cached_type)) goto bad; - if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; -done: - Py_DECREF(abi_module); - assert(cached_type == NULL || PyType_Check(cached_type)); - return (PyTypeObject *) cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#endif - -/* PyVectorcallFastCallDict */ -#if CYTHON_METH_FASTCALL -static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - PyObject *res = NULL; - PyObject *kwnames; - PyObject **newargs; - PyObject **kwvalues; - Py_ssize_t i, pos; - size_t j; - PyObject *key, *value; - unsigned long keys_are_strings; - Py_ssize_t nkw = PyDict_GET_SIZE(kw); - newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); - if (unlikely(newargs == NULL)) { - PyErr_NoMemory(); - return NULL; - } - for (j = 0; j < nargs; j++) newargs[j] = args[j]; - kwnames = PyTuple_New(nkw); - if (unlikely(kwnames == NULL)) { - PyMem_Free(newargs); - return NULL; - } - kwvalues = newargs + nargs; - pos = i = 0; - keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; - while (PyDict_Next(kw, &pos, &key, &value)) { - keys_are_strings &= Py_TYPE(key)->tp_flags; - Py_INCREF(key); - Py_INCREF(value); - PyTuple_SET_ITEM(kwnames, i, key); - kwvalues[i] = value; - i++; - } - if (unlikely(!keys_are_strings)) { - PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - goto cleanup; - } - res = vc(func, newargs, nargs, kwnames); -cleanup: - Py_DECREF(kwnames); - for (i = 0; i < nkw; i++) - Py_DECREF(kwvalues[i]); - PyMem_Free(newargs); - return res; -} -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { - return vc(func, args, nargs, NULL); - } - return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); -} -#endif - -/* CythonFunctionShared */ -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { -#if PY_VERSION_HEX < 0x030900B1 - __Pyx_Py_XDECREF_SET( - __Pyx_CyFunction_GetClassObj(f), - ((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#else - __Pyx_Py_XDECREF_SET( - ((PyCMethodObject *) (f))->mm_class, - (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#endif -} -static PyObject * -__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) -{ - CYTHON_UNUSED_VAR(closure); - if (unlikely(op->func_doc == NULL)) { - if (((PyCFunctionObject*)op)->m_ml->ml_doc) { -#if PY_MAJOR_VERSION >= 3 - op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#else - op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#endif - if (unlikely(op->func_doc == NULL)) - return NULL; - } else { - Py_INCREF(Py_None); - return Py_None; - } - } - Py_INCREF(op->func_doc); - return op->func_doc; -} -static int -__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (value == NULL) { - value = Py_None; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_doc, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_name == NULL)) { -#if PY_MAJOR_VERSION >= 3 - op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#else - op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#endif - if (unlikely(op->func_name == NULL)) - return NULL; - } - Py_INCREF(op->func_name); - return op->func_name; -} -static int -__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_name, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_qualname); - return op->func_qualname; -} -static int -__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_qualname, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_dict == NULL)) { - op->func_dict = PyDict_New(); - if (unlikely(op->func_dict == NULL)) - return NULL; - } - Py_INCREF(op->func_dict); - return op->func_dict; -} -static int -__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(value == NULL)) { - PyErr_SetString(PyExc_TypeError, - "function's dictionary may not be deleted"); - return -1; - } - if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "setting function's dictionary to a non-dict"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_dict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_globals); - return op->func_globals; -} -static PyObject * -__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(op); - CYTHON_UNUSED_VAR(context); - Py_INCREF(Py_None); - return Py_None; -} -static PyObject * -__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) -{ - PyObject* result = (op->func_code) ? op->func_code : Py_None; - CYTHON_UNUSED_VAR(context); - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { - int result = 0; - PyObject *res = op->defaults_getter((PyObject *) op); - if (unlikely(!res)) - return -1; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - op->defaults_tuple = PyTuple_GET_ITEM(res, 0); - Py_INCREF(op->defaults_tuple); - op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); - Py_INCREF(op->defaults_kwdict); - #else - op->defaults_tuple = PySequence_ITEM(res, 0); - if (unlikely(!op->defaults_tuple)) result = -1; - else { - op->defaults_kwdict = PySequence_ITEM(res, 1); - if (unlikely(!op->defaults_kwdict)) result = -1; - } - #endif - Py_DECREF(res); - return result; -} -static int -__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__defaults__ must be set to a tuple object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_tuple; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_tuple; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__kwdefaults__ must be set to a dict object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_kwdict; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_kwdict; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value || value == Py_None) { - value = NULL; - } else if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__annotations__ must be set to a dict object"); - return -1; - } - Py_XINCREF(value); - __Pyx_Py_XDECREF_SET(op->func_annotations, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->func_annotations; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - result = PyDict_New(); - if (unlikely(!result)) return NULL; - op->func_annotations = result; - } - Py_INCREF(result); - return result; -} -static PyObject * -__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { - int is_coroutine; - CYTHON_UNUSED_VAR(context); - if (op->func_is_coroutine) { - return __Pyx_NewRef(op->func_is_coroutine); - } - is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; -#if PY_VERSION_HEX >= 0x03050000 - if (is_coroutine) { - PyObject *module, *fromlist, *marker = __pyx_n_s_is_coroutine; - fromlist = PyList_New(1); - if (unlikely(!fromlist)) return NULL; - Py_INCREF(marker); - PyList_SET_ITEM(fromlist, 0, marker); - module = PyImport_ImportModuleLevelObject(__pyx_n_s_asyncio_coroutines, NULL, NULL, fromlist, 0); - Py_DECREF(fromlist); - if (unlikely(!module)) goto ignore; - op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); - Py_DECREF(module); - if (likely(op->func_is_coroutine)) { - return __Pyx_NewRef(op->func_is_coroutine); - } -ignore: - PyErr_Clear(); - } -#endif - op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); - return __Pyx_NewRef(op->func_is_coroutine); -} -static PyGetSetDef __pyx_CyFunction_getsets[] = { - {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, - {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, - {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, - {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, - {0, 0, 0, 0, 0} -}; -static PyMemberDef __pyx_CyFunction_members[] = { - {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, -#if CYTHON_USE_TYPE_SPECS - {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, -#if CYTHON_METH_FASTCALL -#if CYTHON_BACKPORT_VECTORCALL - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, -#else - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, -#endif -#endif -#if PY_VERSION_HEX < 0x030500A0 - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, -#else - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, -#endif -#endif - {0, 0, 0, 0, 0} -}; -static PyObject * -__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) -{ - CYTHON_UNUSED_VAR(args); -#if PY_MAJOR_VERSION >= 3 - Py_INCREF(m->func_qualname); - return m->func_qualname; -#else - return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); -#endif -} -static PyMethodDef __pyx_CyFunction_methods[] = { - {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, - {0, 0, 0, 0} -}; -#if PY_VERSION_HEX < 0x030500A0 -#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) -#else -#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) -#endif -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyCFunctionObject *cf = (PyCFunctionObject*) op; - if (unlikely(op == NULL)) - return NULL; - op->flags = flags; - __Pyx_CyFunction_weakreflist(op) = NULL; - cf->m_ml = ml; - cf->m_self = (PyObject *) op; - Py_XINCREF(closure); - op->func_closure = closure; - Py_XINCREF(module); - cf->m_module = module; - op->func_dict = NULL; - op->func_name = NULL; - Py_INCREF(qualname); - op->func_qualname = qualname; - op->func_doc = NULL; -#if PY_VERSION_HEX < 0x030900B1 - op->func_classobj = NULL; -#else - ((PyCMethodObject*)op)->mm_class = NULL; -#endif - op->func_globals = globals; - Py_INCREF(op->func_globals); - Py_XINCREF(code); - op->func_code = code; - op->defaults_pyobjects = 0; - op->defaults_size = 0; - op->defaults = NULL; - op->defaults_tuple = NULL; - op->defaults_kwdict = NULL; - op->defaults_getter = NULL; - op->func_annotations = NULL; - op->func_is_coroutine = NULL; -#if CYTHON_METH_FASTCALL - switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { - case METH_NOARGS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; - break; - case METH_O: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; - break; - case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; - break; - case METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; - break; - case METH_VARARGS | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = NULL; - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - Py_DECREF(op); - return NULL; - } -#endif - return (PyObject *) op; -} -static int -__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) -{ - Py_CLEAR(m->func_closure); - Py_CLEAR(((PyCFunctionObject*)m)->m_module); - Py_CLEAR(m->func_dict); - Py_CLEAR(m->func_name); - Py_CLEAR(m->func_qualname); - Py_CLEAR(m->func_doc); - Py_CLEAR(m->func_globals); - Py_CLEAR(m->func_code); -#if PY_VERSION_HEX < 0x030900B1 - Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); -#else - { - PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; - ((PyCMethodObject *) (m))->mm_class = NULL; - Py_XDECREF(cls); - } -#endif - Py_CLEAR(m->defaults_tuple); - Py_CLEAR(m->defaults_kwdict); - Py_CLEAR(m->func_annotations); - Py_CLEAR(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_XDECREF(pydefaults[i]); - PyObject_Free(m->defaults); - m->defaults = NULL; - } - return 0; -} -static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - if (__Pyx_CyFunction_weakreflist(m) != NULL) - PyObject_ClearWeakRefs((PyObject *) m); - __Pyx_CyFunction_clear(m); - __Pyx_PyHeapTypeObject_GC_Del(m); -} -static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - PyObject_GC_UnTrack(m); - __Pyx__CyFunction_dealloc(m); -} -static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) -{ - Py_VISIT(m->func_closure); - Py_VISIT(((PyCFunctionObject*)m)->m_module); - Py_VISIT(m->func_dict); - Py_VISIT(m->func_name); - Py_VISIT(m->func_qualname); - Py_VISIT(m->func_doc); - Py_VISIT(m->func_globals); - Py_VISIT(m->func_code); - Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); - Py_VISIT(m->defaults_tuple); - Py_VISIT(m->defaults_kwdict); - Py_VISIT(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_VISIT(pydefaults[i]); - } - return 0; -} -static PyObject* -__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) -{ -#if PY_MAJOR_VERSION >= 3 - return PyUnicode_FromFormat("", - op->func_qualname, (void *)op); -#else - return PyString_FromFormat("", - PyString_AsString(op->func_qualname), (void *)op); -#endif -} -static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { - PyCFunctionObject* f = (PyCFunctionObject*)func; - PyCFunction meth = f->m_ml->ml_meth; - Py_ssize_t size; - switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { - case METH_VARARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) - return (*meth)(self, arg); - break; - case METH_VARARGS | METH_KEYWORDS: - return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); - case METH_NOARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 0)) - return (*meth)(self, NULL); - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - case METH_O: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 1)) { - PyObject *result, *arg0; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - arg0 = PyTuple_GET_ITEM(arg, 0); - #else - arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; - #endif - result = (*meth)(self, arg0); - #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(arg0); - #endif - return result; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - return NULL; - } - PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", - f->m_ml->ml_name); - return NULL; -} -static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { - return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); -} -static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { - PyObject *result; - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; -#if CYTHON_METH_FASTCALL - __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); - if (vc) { -#if CYTHON_ASSUME_SAFE_MACROS - return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); -#else - (void) &__Pyx_PyVectorcall_FastCallDict; - return PyVectorcall_Call(func, args, kw); -#endif - } -#endif - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - Py_ssize_t argc; - PyObject *new_args; - PyObject *self; - argc = PyTuple_GET_SIZE(args); - new_args = PyTuple_GetSlice(args, 1, argc); - if (unlikely(!new_args)) - return NULL; - self = PyTuple_GetItem(args, 0); - if (unlikely(!self)) { - Py_DECREF(new_args); -#if PY_MAJOR_VERSION > 2 - PyErr_Format(PyExc_TypeError, - "unbound method %.200S() needs an argument", - cyfunc->func_qualname); -#else - PyErr_SetString(PyExc_TypeError, - "unbound method needs an argument"); -#endif - return NULL; - } - result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); - Py_DECREF(new_args); - } else { - result = __Pyx_CyFunction_Call(func, args, kw); - } - return result; -} -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) -{ - int ret = 0; - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - if (unlikely(nargs < 1)) { - PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", - ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - ret = 1; - } - if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - return ret; -} -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 0)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, NULL); -} -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 1)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, args[0]); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; - PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, (size_t)nargs, kwnames); -} -#endif -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_CyFunctionType_slots[] = { - {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, - {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, - {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, - {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, - {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, - {Py_tp_methods, (void *)__pyx_CyFunction_methods}, - {Py_tp_members, (void *)__pyx_CyFunction_members}, - {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, - {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, - {0, 0}, -}; -static PyType_Spec __pyx_CyFunctionType_spec = { - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - __pyx_CyFunctionType_slots -}; -#else -static PyTypeObject __pyx_CyFunctionType_type = { - PyVarObject_HEAD_INIT(0, 0) - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, - (destructor) __Pyx_CyFunction_dealloc, -#if !CYTHON_METH_FASTCALL - 0, -#elif CYTHON_BACKPORT_VECTORCALL - (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), -#else - offsetof(PyCFunctionObject, vectorcall), -#endif - 0, - 0, -#if PY_MAJOR_VERSION < 3 - 0, -#else - 0, -#endif - (reprfunc) __Pyx_CyFunction_repr, - 0, - 0, - 0, - 0, - __Pyx_CyFunction_CallAsMethod, - 0, - 0, - 0, - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#ifdef _Py_TPFLAGS_HAVE_VECTORCALL - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - 0, - (traverseproc) __Pyx_CyFunction_traverse, - (inquiry) __Pyx_CyFunction_clear, - 0, -#if PY_VERSION_HEX < 0x030500A0 - offsetof(__pyx_CyFunctionObject, func_weakreflist), -#else - offsetof(PyCFunctionObject, m_weakreflist), -#endif - 0, - 0, - __pyx_CyFunction_methods, - __pyx_CyFunction_members, - __pyx_CyFunction_getsets, - 0, - 0, - __Pyx_PyMethod_New, - 0, - offsetof(__pyx_CyFunctionObject, func_dict), - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -#if PY_VERSION_HEX >= 0x030400a1 - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, -#endif -#if __PYX_NEED_TP_PRINT_SLOT - 0, -#endif -#if PY_VERSION_HEX >= 0x030C0000 - 0, -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 - 0, -#endif -}; -#endif -static int __pyx_CyFunction_init(PyObject *module) { -#if CYTHON_USE_TYPE_SPECS - __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); -#else - CYTHON_UNUSED_VAR(module); - __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); -#endif - if (unlikely(__pyx_CyFunctionType == NULL)) { - return -1; - } - return 0; -} -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults = PyObject_Malloc(size); - if (unlikely(!m->defaults)) - return PyErr_NoMemory(); - memset(m->defaults, 0, size); - m->defaults_pyobjects = pyobjects; - m->defaults_size = size; - return m->defaults; -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_tuple = tuple; - Py_INCREF(tuple); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_kwdict = dict; - Py_INCREF(dict); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->func_annotations = dict; - Py_INCREF(dict); -} - -/* CythonFunction */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyObject *op = __Pyx_CyFunction_Init( - PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), - ml, flags, qualname, closure, module, globals, code - ); - if (likely(op)) { - PyObject_GC_Track(op); - } - return op; -} - -/* PyObjectLookupSpecial */ -#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error) { - PyObject *res; - PyTypeObject *tp = Py_TYPE(obj); -#if PY_MAJOR_VERSION < 3 - if (unlikely(PyInstance_Check(obj))) - return with_error ? __Pyx_PyObject_GetAttrStr(obj, attr_name) : __Pyx_PyObject_GetAttrStrNoError(obj, attr_name); -#endif - res = _PyType_Lookup(tp, attr_name); - if (likely(res)) { - descrgetfunc f = Py_TYPE(res)->tp_descr_get; - if (!f) { - Py_INCREF(res); - } else { - res = f(res, obj, (PyObject *)tp); - } - } else if (with_error) { - PyErr_SetObject(PyExc_AttributeError, attr_name); - } - return res; -} -#endif - -/* Py3ClassCreate */ -static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, - PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { - PyObject *ns; - if (metaclass) { - PyObject *prep = __Pyx_PyObject_GetAttrStrNoError(metaclass, __pyx_n_s_prepare); - if (prep) { - PyObject *pargs[3] = {NULL, name, bases}; - ns = __Pyx_PyObject_FastCallDict(prep, pargs+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, mkw); - Py_DECREF(prep); - } else { - if (unlikely(PyErr_Occurred())) - return NULL; - ns = PyDict_New(); - } - } else { - ns = PyDict_New(); - } - if (unlikely(!ns)) - return NULL; - if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; -#if PY_VERSION_HEX >= 0x03030000 - if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; -#else - CYTHON_MAYBE_UNUSED_VAR(qualname); -#endif - if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; - return ns; -bad: - Py_DECREF(ns); - return NULL; -} -#if PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS -static int __Pyx_SetNamesPEP487(PyObject *type_obj) { - PyTypeObject *type = (PyTypeObject*) type_obj; - PyObject *names_to_set, *key, *value, *set_name, *tmp; - Py_ssize_t i = 0; -#if CYTHON_USE_TYPE_SLOTS - names_to_set = PyDict_Copy(type->tp_dict); -#else - { - PyObject *d = PyObject_GetAttr(type_obj, __pyx_n_s_dict); - names_to_set = NULL; - if (likely(d)) { - PyObject *names_to_set = PyDict_New(); - int ret = likely(names_to_set) ? PyDict_Update(names_to_set, d) : -1; - Py_DECREF(d); - if (unlikely(ret < 0)) - Py_CLEAR(names_to_set); - } - } -#endif - if (unlikely(names_to_set == NULL)) - goto bad; - while (PyDict_Next(names_to_set, &i, &key, &value)) { - set_name = __Pyx_PyObject_LookupSpecialNoError(value, __pyx_n_s_set_name); - if (unlikely(set_name != NULL)) { - tmp = __Pyx_PyObject_Call2Args(set_name, type_obj, key); - Py_DECREF(set_name); - if (unlikely(tmp == NULL)) { - __Pyx_TypeName value_type_name = - __Pyx_PyType_GetName(Py_TYPE(value)); - __Pyx_TypeName type_name = __Pyx_PyType_GetName(type); - PyErr_Format(PyExc_RuntimeError, -#if PY_MAJOR_VERSION >= 3 - "Error calling __set_name__ on '" __Pyx_FMT_TYPENAME "' instance %R " "in '" __Pyx_FMT_TYPENAME "'", - value_type_name, key, type_name); -#else - "Error calling __set_name__ on '" __Pyx_FMT_TYPENAME "' instance %.100s in '" __Pyx_FMT_TYPENAME "'", - value_type_name, - PyString_Check(key) ? PyString_AS_STRING(key) : "?", - type_name); -#endif - goto bad; - } else { - Py_DECREF(tmp); - } - } - else if (unlikely(PyErr_Occurred())) { - goto bad; - } - } - Py_DECREF(names_to_set); - return 0; -bad: - Py_XDECREF(names_to_set); - return -1; -} -static PyObject *__Pyx_InitSubclassPEP487(PyObject *type_obj, PyObject *mkw) { -#if CYTHON_USE_TYPE_SLOTS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - PyTypeObject *type = (PyTypeObject*) type_obj; - PyObject *mro = type->tp_mro; - Py_ssize_t i, nbases; - if (unlikely(!mro)) goto done; - (void) &__Pyx_GetBuiltinName; - Py_INCREF(mro); - nbases = PyTuple_GET_SIZE(mro); - assert(PyTuple_GET_ITEM(mro, 0) == type_obj); - for (i = 1; i < nbases-1; i++) { - PyObject *base, *dict, *meth; - base = PyTuple_GET_ITEM(mro, i); - dict = ((PyTypeObject *)base)->tp_dict; - meth = __Pyx_PyDict_GetItemStrWithError(dict, __pyx_n_s_init_subclass); - if (unlikely(meth)) { - descrgetfunc f = Py_TYPE(meth)->tp_descr_get; - PyObject *res; - Py_INCREF(meth); - if (likely(f)) { - res = f(meth, NULL, type_obj); - Py_DECREF(meth); - if (unlikely(!res)) goto bad; - meth = res; - } - res = __Pyx_PyObject_FastCallDict(meth, NULL, 0, mkw); - Py_DECREF(meth); - if (unlikely(!res)) goto bad; - Py_DECREF(res); - goto done; - } else if (unlikely(PyErr_Occurred())) { - goto bad; - } - } -done: - Py_XDECREF(mro); - return type_obj; -bad: - Py_XDECREF(mro); - Py_DECREF(type_obj); - return NULL; -#else - PyObject *super_type, *super, *func, *res; -#if CYTHON_COMPILING_IN_PYPY && !defined(PySuper_Type) - super_type = __Pyx_GetBuiltinName(__pyx_n_s_super); -#else - super_type = (PyObject*) &PySuper_Type; - (void) &__Pyx_GetBuiltinName; -#endif - super = likely(super_type) ? __Pyx_PyObject_Call2Args(super_type, type_obj, type_obj) : NULL; -#if CYTHON_COMPILING_IN_PYPY && !defined(PySuper_Type) - Py_XDECREF(super_type); -#endif - if (unlikely(!super)) { - Py_CLEAR(type_obj); - goto done; - } - func = __Pyx_PyObject_GetAttrStrNoError(super, __pyx_n_s_init_subclass); - Py_DECREF(super); - if (likely(!func)) { - if (unlikely(PyErr_Occurred())) - Py_CLEAR(type_obj); - goto done; - } - res = __Pyx_PyObject_FastCallDict(func, NULL, 0, mkw); - Py_DECREF(func); - if (unlikely(!res)) - Py_CLEAR(type_obj); - Py_XDECREF(res); -done: - return type_obj; -#endif -} -#endif -static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, - PyObject *dict, PyObject *mkw, - int calculate_metaclass, int allow_py2_metaclass) { - PyObject *result; - PyObject *owned_metaclass = NULL; - PyObject *margs[4] = {NULL, name, bases, dict}; - if (allow_py2_metaclass) { - owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); - if (owned_metaclass) { - metaclass = owned_metaclass; - } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { - PyErr_Clear(); - } else { - return NULL; - } - } - if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { - metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); - Py_XDECREF(owned_metaclass); - if (unlikely(!metaclass)) - return NULL; - owned_metaclass = metaclass; - } - result = __Pyx_PyObject_FastCallDict(metaclass, margs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, -#if PY_VERSION_HEX < 0x030600A4 - (metaclass == (PyObject*)&PyType_Type) ? NULL : mkw -#else - mkw -#endif - ); - Py_XDECREF(owned_metaclass); -#if PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS - if (likely(result) && likely(PyType_Check(result))) { - if (unlikely(__Pyx_SetNamesPEP487(result) < 0)) { - Py_CLEAR(result); - } else { - result = __Pyx_InitSubclassPEP487(result, mkw); - } - } -#else - (void) &__Pyx_GetBuiltinName; -#endif - return result; -} - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - CYTHON_MAYBE_UNUSED_VAR(tstate); - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} -#endif - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -#if PY_VERSION_HEX >= 0x030b00a6 - #ifndef Py_BUILD_CORE - #define Py_BUILD_CORE 1 - #endif - #include "internal/pycore_frame.h" -#endif -#if CYTHON_COMPILING_IN_LIMITED_API -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - if (c_line) { - (void) __pyx_cfilenm; - (void) __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); - } - _PyTraceback_Add(funcname, filename, py_line); -} -#else -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = NULL; - PyObject *py_funcname = NULL; - #if PY_MAJOR_VERSION < 3 - PyObject *py_srcfile = NULL; - py_srcfile = PyString_FromString(filename); - if (!py_srcfile) goto bad; - #endif - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - funcname = PyUnicode_AsUTF8(py_funcname); - if (!funcname) goto bad; - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - #endif - } - #if PY_MAJOR_VERSION < 3 - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - #else - py_code = PyCode_NewEmpty(filename, funcname, py_line); - #endif - Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline - return py_code; -bad: - Py_XDECREF(py_funcname); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_srcfile); - #endif - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject *ptype, *pvalue, *ptraceback; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) { - /* If the code object creation fails, then we should clear the - fetched exception references and propagate the new exception */ - Py_XDECREF(ptype); - Py_XDECREF(pvalue); - Py_XDECREF(ptraceback); - goto bad; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} -#endif - -/* CIntToPy */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* FormatTypeName */ -#if CYTHON_COMPILING_IN_LIMITED_API -static __Pyx_TypeName -__Pyx_PyType_GetName(PyTypeObject* tp) -{ - PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, - __pyx_n_s_name_2); - if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { - PyErr_Clear(); - Py_XSETREF(name, __Pyx_NewRef(__pyx_n_s__51)); - } - return name; -} -#endif - -/* CIntFromPyVerify */ -#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntFromPy */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(long) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - if (unlikely(__Pyx_PyLong_IsNeg(x))) { - goto raise_neg_overflow; - } else if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_DigitCount(x)) { - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(long) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_SignedDigitCount(x)) { - case -2: - if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } - } -#endif - if ((sizeof(long) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); -#if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } -#endif - if (likely(v)) { - int ret = -1; -#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); -#else - PyObject *stepval = NULL, *mask = NULL, *shift = NULL; - int bits, remaining_bits, is_negative = 0; - long idigit; - int chunk_size = (sizeof(long) < 8) ? 30 : 62; - if (unlikely(!PyLong_CheckExact(v))) { - PyObject *tmp = v; - v = PyNumber_Long(v); - assert(PyLong_CheckExact(v)); - Py_DECREF(tmp); - if (unlikely(!v)) return (long) -1; - } -#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(x) == 0) - return (long) 0; - is_negative = Py_SIZE(x) < 0; -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - is_negative = result == 1; - } -#endif - if (is_unsigned && unlikely(is_negative)) { - goto raise_neg_overflow; - } else if (is_negative) { - stepval = PyNumber_Invert(v); - if (unlikely(!stepval)) - return (long) -1; - } else { - stepval = __Pyx_NewRef(v); - } - val = (long) 0; - mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; - shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; - for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) { - PyObject *tmp, *digit; - digit = PyNumber_And(stepval, mask); - if (unlikely(!digit)) goto done; - idigit = PyLong_AsLong(digit); - Py_DECREF(digit); - if (unlikely(idigit < 0)) goto done; - tmp = PyNumber_Rshift(stepval, shift); - if (unlikely(!tmp)) goto done; - Py_DECREF(stepval); stepval = tmp; - val |= ((long) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(stepval) == 0) - goto unpacking_done; - #endif - } - idigit = PyLong_AsLong(stepval); - if (unlikely(idigit < 0)) goto done; - remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1); - if (unlikely(idigit >= (1L << remaining_bits))) - goto raise_overflow; - val |= ((long) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - unpacking_done: - #endif - if (!is_unsigned) { - if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1)))) - goto raise_overflow; - if (is_negative) - val = ~val; - } - ret = 0; - done: - Py_XDECREF(shift); - Py_XDECREF(mask); - Py_XDECREF(stepval); -#endif - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(int) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - if (unlikely(__Pyx_PyLong_IsNeg(x))) { - goto raise_neg_overflow; - } else if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_DigitCount(x)) { - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(int) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_SignedDigitCount(x)) { - case -2: - if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } - } -#endif - if ((sizeof(int) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); -#if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } -#endif - if (likely(v)) { - int ret = -1; -#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); -#else - PyObject *stepval = NULL, *mask = NULL, *shift = NULL; - int bits, remaining_bits, is_negative = 0; - long idigit; - int chunk_size = (sizeof(long) < 8) ? 30 : 62; - if (unlikely(!PyLong_CheckExact(v))) { - PyObject *tmp = v; - v = PyNumber_Long(v); - assert(PyLong_CheckExact(v)); - Py_DECREF(tmp); - if (unlikely(!v)) return (int) -1; - } -#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(x) == 0) - return (int) 0; - is_negative = Py_SIZE(x) < 0; -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - is_negative = result == 1; - } -#endif - if (is_unsigned && unlikely(is_negative)) { - goto raise_neg_overflow; - } else if (is_negative) { - stepval = PyNumber_Invert(v); - if (unlikely(!stepval)) - return (int) -1; - } else { - stepval = __Pyx_NewRef(v); - } - val = (int) 0; - mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; - shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; - for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) { - PyObject *tmp, *digit; - digit = PyNumber_And(stepval, mask); - if (unlikely(!digit)) goto done; - idigit = PyLong_AsLong(digit); - Py_DECREF(digit); - if (unlikely(idigit < 0)) goto done; - tmp = PyNumber_Rshift(stepval, shift); - if (unlikely(!tmp)) goto done; - Py_DECREF(stepval); stepval = tmp; - val |= ((int) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(stepval) == 0) - goto unpacking_done; - #endif - } - idigit = PyLong_AsLong(stepval); - if (unlikely(idigit < 0)) goto done; - remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1); - if (unlikely(idigit >= (1L << remaining_bits))) - goto raise_overflow; - val |= ((int) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - unpacking_done: - #endif - if (!is_unsigned) { - if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1)))) - goto raise_overflow; - if (is_negative) - val = ~val; - } - ret = 0; - done: - Py_XDECREF(shift); - Py_XDECREF(mask); - Py_XDECREF(stepval); -#endif - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CheckBinaryVersion */ -static int __Pyx_check_binary_version(void) { - char ctversion[5]; - int same=1, i, found_dot; - const char* rt_from_call = Py_GetVersion(); - PyOS_snprintf(ctversion, 5, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - found_dot = 0; - for (i = 0; i < 4; i++) { - if (!ctversion[i]) { - same = (rt_from_call[i] < '0' || rt_from_call[i] > '9'); - break; - } - if (rt_from_call[i] != ctversion[i]) { - same = 0; - break; - } - } - if (!same) { - char rtversion[5] = {'\0'}; - char message[200]; - for (i=0; i<4; ++i) { - if (rt_from_call[i] == '.') { - if (found_dot) break; - found_dot = 1; - } else if (rt_from_call[i] < '0' || rt_from_call[i] > '9') { - break; - } - rtversion[i] = rt_from_call[i]; - } - PyOS_snprintf(message, sizeof(message), - "compile time version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ -#if PY_MAJOR_VERSION >= 3 -static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { - if (t.is_unicode | t.is_str) { - if (t.intern) { - *str = PyUnicode_InternFromString(t.s); - } else if (t.encoding) { - *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); - } else { - *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); - } - } else { - *str = PyBytes_FromStringAndSize(t.s, t.n - 1); - } - if (!*str) - return -1; - if (PyObject_Hash(*str) == -1) - return -1; - return 0; -} -#endif -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION >= 3 - __Pyx_InitString(*t, t->p); - #else - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - #endif - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { - __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " - "The ability to return an instance of a strict subclass of int is deprecated, " - "and may be removed in a future version of Python.", - result_type_name)) { - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; - } - __Pyx_DECREF_TypeName(result_type_name); - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", - type_name, type_name, result_type_name); - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(__Pyx_PyLong_IsCompact(b))) { - return __Pyx_PyLong_CompactValue(b); - } else { - const digit* digits = __Pyx_PyLong_Digits(b); - const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b); - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { - if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { - return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); -#if PY_MAJOR_VERSION < 3 - } else if (likely(PyInt_CheckExact(o))) { - return PyInt_AS_LONG(o); -#endif - } else { - Py_ssize_t ival; - PyObject *x; - x = PyNumber_Index(o); - if (!x) return -1; - ival = PyInt_AsLong(x); - Py_DECREF(x); - return ival; - } -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -/* #### Code section: utility_code_pragmas_end ### */ -#ifdef _MSC_VER -#pragma warning( pop ) -#endif - - - -/* #### Code section: end ### */ -#endif /* Py_PYTHON_H */ diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py deleted file mode 100644 index 324ffd016515f0f96e6505e53ffc5c50b149be49..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py +++ /dev/null @@ -1,1037 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import floatToFixedToStr -from fontTools.misc.textTools import byteord, safeEval - -# from itertools import * -from . import DefaultTable -from . import grUtils -from array import array -from functools import reduce -import struct, re, sys - -Silf_hdr_format = """ - > - version: 16.16F -""" - -Silf_hdr_format_3 = """ - > - version: 16.16F - compilerVersion: L - numSilf: H - x - x -""" - -Silf_part1_format_v3 = """ - > - ruleVersion: 16.16F - passOffset: H - pseudosOffset: H -""" - -Silf_part1_format = """ - > - maxGlyphID: H - extraAscent: h - extraDescent: h - numPasses: B - iSubst: B - iPos: B - iJust: B - iBidi: B - flags: B - maxPreContext: B - maxPostContext: B - attrPseudo: B - attrBreakWeight: B - attrDirectionality: B - attrMirroring: B - attrSkipPasses: B - numJLevels: B -""" - -Silf_justify_format = """ - > - attrStretch: B - attrShrink: B - attrStep: B - attrWeight: B - runto: B - x - x - x -""" - -Silf_part2_format = """ - > - numLigComp: H - numUserDefn: B - maxCompPerLig: B - direction: B - attCollisions: B - x - x - x - numCritFeatures: B -""" - -Silf_pseudomap_format = """ - > - unicode: L - nPseudo: H -""" - -Silf_pseudomap_format_h = """ - > - unicode: H - nPseudo: H -""" - -Silf_classmap_format = """ - > - numClass: H - numLinear: H -""" - -Silf_lookupclass_format = """ - > - numIDs: H - searchRange: H - entrySelector: H - rangeShift: H -""" - -Silf_lookuppair_format = """ - > - glyphId: H - index: H -""" - -Silf_pass_format = """ - > - flags: B - maxRuleLoop: B - maxRuleContext: B - maxBackup: B - numRules: H - fsmOffset: H - pcCode: L - rcCode: L - aCode: L - oDebug: L - numRows: H - numTransitional: H - numSuccess: H - numColumns: H -""" - -aCode_info = ( - ("NOP", 0), - ("PUSH_BYTE", "b"), - ("PUSH_BYTE_U", "B"), - ("PUSH_SHORT", ">h"), - ("PUSH_SHORT_U", ">H"), - ("PUSH_LONG", ">L"), - ("ADD", 0), - ("SUB", 0), - ("MUL", 0), - ("DIV", 0), - ("MIN", 0), - ("MAX", 0), - ("NEG", 0), - ("TRUNC8", 0), - ("TRUNC16", 0), - ("COND", 0), - ("AND", 0), # x10 - ("OR", 0), - ("NOT", 0), - ("EQUAL", 0), - ("NOT_EQ", 0), - ("LESS", 0), - ("GTR", 0), - ("LESS_EQ", 0), - ("GTR_EQ", 0), - ("NEXT", 0), - ("NEXT_N", "b"), - ("COPY_NEXT", 0), - ("PUT_GLYPH_8BIT_OBS", "B"), - ("PUT_SUBS_8BIT_OBS", "bBB"), - ("PUT_COPY", "b"), - ("INSERT", 0), - ("DELETE", 0), # x20 - ("ASSOC", -1), - ("CNTXT_ITEM", "bB"), - ("ATTR_SET", "B"), - ("ATTR_ADD", "B"), - ("ATTR_SUB", "B"), - ("ATTR_SET_SLOT", "B"), - ("IATTR_SET_SLOT", "BB"), - ("PUSH_SLOT_ATTR", "Bb"), - ("PUSH_GLYPH_ATTR_OBS", "Bb"), - ("PUSH_GLYPH_METRIC", "Bbb"), - ("PUSH_FEAT", "Bb"), - ("PUSH_ATT_TO_GATTR_OBS", "Bb"), - ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"), - ("PUSH_ISLOT_ATTR", "Bbb"), - ("PUSH_IGLYPH_ATTR", "Bbb"), - ("POP_RET", 0), # x30 - ("RET_ZERO", 0), - ("RET_TRUE", 0), - ("IATTR_SET", "BB"), - ("IATTR_ADD", "BB"), - ("IATTR_SUB", "BB"), - ("PUSH_PROC_STATE", "B"), - ("PUSH_VERSION", 0), - ("PUT_SUBS", ">bHH"), - ("PUT_SUBS2", 0), - ("PUT_SUBS3", 0), - ("PUT_GLYPH", ">H"), - ("PUSH_GLYPH_ATTR", ">Hb"), - ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), - ("BITOR", 0), - ("BITAND", 0), - ("BITNOT", 0), # x40 - ("BITSET", ">HH"), - ("SET_FEAT", "Bb"), -) -aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)]) - - -def disassemble(aCode): - codelen = len(aCode) - pc = 0 - res = [] - while pc < codelen: - opcode = byteord(aCode[pc : pc + 1]) - if opcode > len(aCode_info): - instr = aCode_info[0] - else: - instr = aCode_info[opcode] - pc += 1 - if instr[1] != 0 and pc >= codelen: - return res - if instr[1] == -1: - count = byteord(aCode[pc]) - fmt = "%dB" % count - pc += 1 - elif instr[1] == 0: - fmt = "" - else: - fmt = instr[1] - if fmt == "": - res.append(instr[0]) - continue - parms = struct.unpack_from(fmt, aCode[pc:]) - res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") - pc += struct.calcsize(fmt) - return res - - -instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?") - - -def assemble(instrs): - res = b"" - for inst in instrs: - m = instre.match(inst) - if not m or not m.group(1) in aCode_map: - continue - opcode, parmfmt = aCode_map[m.group(1)] - res += struct.pack("B", opcode) - if m.group(2): - if parmfmt == 0: - continue - parms = [int(x) for x in re.split(r",\s*", m.group(2))] - if parmfmt == -1: - l = len(parms) - res += struct.pack(("%dB" % (l + 1)), l, *parms) - else: - res += struct.pack(parmfmt, *parms) - return res - - -def writecode(tag, writer, instrs): - writer.begintag(tag) - writer.newline() - for l in disassemble(instrs): - writer.write(l) - writer.newline() - writer.endtag(tag) - writer.newline() - - -def readcode(content): - res = [] - for e in content_string(content).split("\n"): - e = e.strip() - if not len(e): - continue - res.append(e) - return assemble(res) - - -attrs_info = ( - "flags", - "extraAscent", - "extraDescent", - "maxGlyphID", - "numLigComp", - "numUserDefn", - "maxCompPerLig", - "direction", - "lbGID", -) -attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi") -attrs_contexts = ("maxPreContext", "maxPostContext") -attrs_attributes = ( - "attrPseudo", - "attrBreakWeight", - "attrDirectionality", - "attrMirroring", - "attrSkipPasses", - "attCollisions", -) -pass_attrs_info = ( - "flags", - "maxRuleLoop", - "maxRuleContext", - "maxBackup", - "minRulePreContext", - "maxRulePreContext", - "collisionThreshold", -) -pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns") - - -def writesimple(tag, self, writer, *attrkeys): - attrs = dict([(k, getattr(self, k)) for k in attrkeys]) - writer.simpletag(tag, **attrs) - writer.newline() - - -def getSimple(self, attrs, *attr_list): - for k in attr_list: - if k in attrs: - setattr(self, k, int(safeEval(attrs[k]))) - - -def content_string(contents): - res = "" - for element in contents: - if isinstance(element, tuple): - continue - res += element - return res.strip() - - -def wrapline(writer, dat, length=80): - currline = "" - for d in dat: - if len(currline) > length: - writer.write(currline[:-1]) - writer.newline() - currline = "" - currline += d + " " - if len(currline): - writer.write(currline[:-1]) - writer.newline() - - -class _Object: - pass - - -class table_S__i_l_f(DefaultTable.DefaultTable): - """Silf table support""" - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.silfs = [] - - def decompile(self, data, ttFont): - sstruct.unpack2(Silf_hdr_format, data, self) - self.version = float(floatToFixedToStr(self.version, precisionBits=16)) - if self.version >= 5.0: - (data, self.scheme) = grUtils.decompress(data) - sstruct.unpack2(Silf_hdr_format_3, data, self) - base = sstruct.calcsize(Silf_hdr_format_3) - elif self.version < 3.0: - self.numSilf = struct.unpack(">H", data[4:6]) - self.scheme = 0 - self.compilerVersion = 0 - base = 8 - else: - self.scheme = 0 - sstruct.unpack2(Silf_hdr_format_3, data, self) - base = sstruct.calcsize(Silf_hdr_format_3) - - silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:]) - for offset in silfoffsets: - s = Silf() - self.silfs.append(s) - s.decompile(data[offset:], ttFont, self.version) - - def compile(self, ttFont): - self.numSilf = len(self.silfs) - if self.version < 3.0: - hdr = sstruct.pack(Silf_hdr_format, self) - hdr += struct.pack(">HH", self.numSilf, 0) - else: - hdr = sstruct.pack(Silf_hdr_format_3, self) - offset = len(hdr) + 4 * self.numSilf - data = b"" - for s in self.silfs: - hdr += struct.pack(">L", offset) - subdata = s.compile(ttFont, self.version) - offset += len(subdata) - data += subdata - if self.version >= 5.0: - return grUtils.compress(self.scheme, hdr + data) - return hdr + data - - def toXML(self, writer, ttFont): - writer.comment("Attributes starting with _ are informative only") - writer.newline() - writer.simpletag( - "version", - version=self.version, - compilerVersion=self.compilerVersion, - compressionScheme=self.scheme, - ) - writer.newline() - for s in self.silfs: - writer.begintag("silf") - writer.newline() - s.toXML(writer, ttFont, self.version) - writer.endtag("silf") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.scheme = int(safeEval(attrs["compressionScheme"])) - self.version = float(safeEval(attrs["version"])) - self.compilerVersion = int(safeEval(attrs["compilerVersion"])) - return - if name == "silf": - s = Silf() - self.silfs.append(s) - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - s.fromXML(tag, attrs, subcontent, ttFont, self.version) - - -class Silf(object): - """A particular Silf subtable""" - - def __init__(self): - self.passes = [] - self.scriptTags = [] - self.critFeatures = [] - self.jLevels = [] - self.pMap = {} - - def decompile(self, data, ttFont, version=2.0): - if version >= 3.0: - _, data = sstruct.unpack2(Silf_part1_format_v3, data, self) - self.ruleVersion = float( - floatToFixedToStr(self.ruleVersion, precisionBits=16) - ) - _, data = sstruct.unpack2(Silf_part1_format, data, self) - for jlevel in range(self.numJLevels): - j, data = sstruct.unpack2(Silf_justify_format, data, _Object()) - self.jLevels.append(j) - _, data = sstruct.unpack2(Silf_part2_format, data, self) - if self.numCritFeatures: - self.critFeatures = struct.unpack_from( - (">%dH" % self.numCritFeatures), data - ) - data = data[self.numCritFeatures * 2 + 1 :] - (numScriptTag,) = struct.unpack_from("B", data) - if numScriptTag: - self.scriptTags = [ - struct.unpack("4s", data[x : x + 4])[0].decode("ascii") - for x in range(1, 1 + 4 * numScriptTag, 4) - ] - data = data[1 + 4 * numScriptTag :] - (self.lbGID,) = struct.unpack(">H", data[:2]) - if self.numPasses: - self.oPasses = struct.unpack( - (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses] - ) - data = data[6 + 4 * self.numPasses :] - (numPseudo,) = struct.unpack(">H", data[:2]) - for i in range(numPseudo): - if version >= 3.0: - pseudo = sstruct.unpack( - Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object() - ) - else: - pseudo = sstruct.unpack( - Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object() - ) - self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo) - data = data[8 + 6 * numPseudo :] - currpos = ( - sstruct.calcsize(Silf_part1_format) - + sstruct.calcsize(Silf_justify_format) * self.numJLevels - + sstruct.calcsize(Silf_part2_format) - + 2 * self.numCritFeatures - + 1 - + 1 - + 4 * numScriptTag - + 6 - + 4 * self.numPasses - + 8 - + 6 * numPseudo - ) - if version >= 3.0: - currpos += sstruct.calcsize(Silf_part1_format_v3) - self.classes = Classes() - self.classes.decompile(data, ttFont, version) - for i in range(self.numPasses): - p = Pass() - self.passes.append(p) - p.decompile( - data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos], - ttFont, - version, - ) - - def compile(self, ttFont, version=2.0): - self.numPasses = len(self.passes) - self.numJLevels = len(self.jLevels) - self.numCritFeatures = len(self.critFeatures) - numPseudo = len(self.pMap) - data = b"" - if version >= 3.0: - hdroffset = sstruct.calcsize(Silf_part1_format_v3) - else: - hdroffset = 0 - data += sstruct.pack(Silf_part1_format, self) - for j in self.jLevels: - data += sstruct.pack(Silf_justify_format, j) - data += sstruct.pack(Silf_part2_format, self) - if self.numCritFeatures: - data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures) - data += struct.pack("BB", 0, len(self.scriptTags)) - if len(self.scriptTags): - tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags] - data += b"".join(tdata) - data += struct.pack(">H", self.lbGID) - self.passOffset = len(data) - - data1 = grUtils.bininfo(numPseudo, 6) - currpos = hdroffset + len(data) + 4 * (self.numPasses + 1) - self.pseudosOffset = currpos + len(data1) - for u, p in sorted(self.pMap.items()): - data1 += struct.pack( - (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p) - ) - data1 += self.classes.compile(ttFont, version) - currpos += len(data1) - data2 = b"" - datao = b"" - for i, p in enumerate(self.passes): - base = currpos + len(data2) - datao += struct.pack(">L", base) - data2 += p.compile(ttFont, base, version) - datao += struct.pack(">L", currpos + len(data2)) - - if version >= 3.0: - data3 = sstruct.pack(Silf_part1_format_v3, self) - else: - data3 = b"" - return data3 + data + datao + data1 + data2 - - def toXML(self, writer, ttFont, version=2.0): - if version >= 3.0: - writer.simpletag("version", ruleVersion=self.ruleVersion) - writer.newline() - writesimple("info", self, writer, *attrs_info) - writesimple("passindexes", self, writer, *attrs_passindexes) - writesimple("contexts", self, writer, *attrs_contexts) - writesimple("attributes", self, writer, *attrs_attributes) - if len(self.jLevels): - writer.begintag("justifications") - writer.newline() - jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format) - for i, j in enumerate(self.jLevels): - attrs = dict([(k, getattr(j, k)) for k in jnames]) - writer.simpletag("justify", **attrs) - writer.newline() - writer.endtag("justifications") - writer.newline() - if len(self.critFeatures): - writer.begintag("critFeatures") - writer.newline() - writer.write(" ".join(map(str, self.critFeatures))) - writer.newline() - writer.endtag("critFeatures") - writer.newline() - if len(self.scriptTags): - writer.begintag("scriptTags") - writer.newline() - writer.write(" ".join(self.scriptTags)) - writer.newline() - writer.endtag("scriptTags") - writer.newline() - if self.pMap: - writer.begintag("pseudoMap") - writer.newline() - for k, v in sorted(self.pMap.items()): - writer.simpletag("pseudo", unicode=hex(k), pseudo=v) - writer.newline() - writer.endtag("pseudoMap") - writer.newline() - self.classes.toXML(writer, ttFont, version) - if len(self.passes): - writer.begintag("passes") - writer.newline() - for i, p in enumerate(self.passes): - writer.begintag("pass", _index=i) - writer.newline() - p.toXML(writer, ttFont, version) - writer.endtag("pass") - writer.newline() - writer.endtag("passes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "version": - self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0"))) - if name == "info": - getSimple(self, attrs, *attrs_info) - elif name == "passindexes": - getSimple(self, attrs, *attrs_passindexes) - elif name == "contexts": - getSimple(self, attrs, *attrs_contexts) - elif name == "attributes": - getSimple(self, attrs, *attrs_attributes) - elif name == "justifications": - for element in content: - if not isinstance(element, tuple): - continue - (tag, attrs, subcontent) = element - if tag == "justify": - j = _Object() - for k, v in attrs.items(): - setattr(j, k, int(v)) - self.jLevels.append(j) - elif name == "critFeatures": - self.critFeatures = [] - element = content_string(content) - self.critFeatures.extend(map(int, element.split())) - elif name == "scriptTags": - self.scriptTags = [] - element = content_string(content) - for n in element.split(): - self.scriptTags.append(n) - elif name == "pseudoMap": - self.pMap = {} - for element in content: - if not isinstance(element, tuple): - continue - (tag, attrs, subcontent) = element - if tag == "pseudo": - k = int(attrs["unicode"], 16) - v = attrs["pseudo"] - self.pMap[k] = v - elif name == "classes": - self.classes = Classes() - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - self.classes.fromXML(tag, attrs, subcontent, ttFont, version) - elif name == "passes": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "pass": - p = Pass() - for e in subcontent: - if not isinstance(e, tuple): - continue - p.fromXML(e[0], e[1], e[2], ttFont, version) - self.passes.append(p) - - -class Classes(object): - def __init__(self): - self.linear = [] - self.nonLinear = [] - - def decompile(self, data, ttFont, version=2.0): - sstruct.unpack2(Silf_classmap_format, data, self) - if version >= 4.0: - oClasses = struct.unpack( - (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass] - ) - else: - oClasses = struct.unpack( - (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass] - ) - for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]): - self.linear.append( - ttFont.getGlyphName(x) - for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e]) - ) - for s, e in zip( - oClasses[self.numLinear : self.numClass], - oClasses[self.numLinear + 1 : self.numClass + 1], - ): - nonLinids = [ - struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4) - ] - nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids]) - self.nonLinear.append(nonLin) - - def compile(self, ttFont, version=2.0): - data = b"" - oClasses = [] - if version >= 4.0: - offset = 8 + 4 * (len(self.linear) + len(self.nonLinear)) - else: - offset = 6 + 2 * (len(self.linear) + len(self.nonLinear)) - for l in self.linear: - oClasses.append(len(data) + offset) - gs = [ttFont.getGlyphID(x) for x in l] - data += struct.pack((">%dH" % len(l)), *gs) - for l in self.nonLinear: - oClasses.append(len(data) + offset) - gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()] - data += grUtils.bininfo(len(gs)) - data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)]) - oClasses.append(len(data) + offset) - self.numClass = len(oClasses) - 1 - self.numLinear = len(self.linear) - return ( - sstruct.pack(Silf_classmap_format, self) - + struct.pack( - ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses - ) - + data - ) - - def toXML(self, writer, ttFont, version=2.0): - writer.begintag("classes") - writer.newline() - writer.begintag("linearClasses") - writer.newline() - for i, l in enumerate(self.linear): - writer.begintag("linear", _index=i) - writer.newline() - wrapline(writer, l) - writer.endtag("linear") - writer.newline() - writer.endtag("linearClasses") - writer.newline() - writer.begintag("nonLinearClasses") - writer.newline() - for i, l in enumerate(self.nonLinear): - writer.begintag("nonLinear", _index=i + self.numLinear) - writer.newline() - for inp, ind in l.items(): - writer.simpletag("map", glyph=inp, index=ind) - writer.newline() - writer.endtag("nonLinear") - writer.newline() - writer.endtag("nonLinearClasses") - writer.newline() - writer.endtag("classes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "linearClasses": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "linear": - l = content_string(subcontent).split() - self.linear.append(l) - elif name == "nonLinearClasses": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "nonLinear": - l = {} - for e in subcontent: - if not isinstance(e, tuple): - continue - tag, attrs, subsubcontent = e - if tag == "map": - l[attrs["glyph"]] = int(safeEval(attrs["index"])) - self.nonLinear.append(l) - - -class Pass(object): - def __init__(self): - self.colMap = {} - self.rules = [] - self.rulePreContexts = [] - self.ruleSortKeys = [] - self.ruleConstraints = [] - self.passConstraints = b"" - self.actions = [] - self.stateTrans = [] - self.startStates = [] - - def decompile(self, data, ttFont, version=2.0): - _, data = sstruct.unpack2(Silf_pass_format, data, self) - (numRange, _, _, _) = struct.unpack(">4H", data[:8]) - data = data[8:] - for i in range(numRange): - (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6]) - for g in range(first, last + 1): - self.colMap[ttFont.getGlyphName(g)] = col - data = data[6 * numRange :] - oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data) - data = data[2 + 2 * self.numSuccess :] - rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data) - self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])] - data = data[2 * oRuleMap[-1] :] - (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2]) - numStartStates = self.maxRulePreContext - self.minRulePreContext + 1 - self.startStates = struct.unpack( - (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2] - ) - data = data[2 + numStartStates * 2 :] - self.ruleSortKeys = struct.unpack( - (">%dH" % self.numRules), data[: 2 * self.numRules] - ) - data = data[2 * self.numRules :] - self.rulePreContexts = struct.unpack( - ("%dB" % self.numRules), data[: self.numRules] - ) - data = data[self.numRules :] - (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3]) - oConstraints = list( - struct.unpack( - (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2] - ) - ) - data = data[5 + self.numRules * 2 :] - oActions = list( - struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2]) - ) - data = data[2 * self.numRules + 2 :] - for i in range(self.numTransitional): - a = array( - "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2] - ) - if sys.byteorder != "big": - a.byteswap() - self.stateTrans.append(a) - data = data[self.numTransitional * self.numColumns * 2 + 1 :] - self.passConstraints = data[:pConstraint] - data = data[pConstraint:] - for i in range(len(oConstraints) - 2, -1, -1): - if oConstraints[i] == 0: - oConstraints[i] = oConstraints[i + 1] - self.ruleConstraints = [ - (data[s:e] if (e - s > 1) else b"") - for (s, e) in zip(oConstraints, oConstraints[1:]) - ] - data = data[oConstraints[-1] :] - self.actions = [ - (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:]) - ] - data = data[oActions[-1] :] - # not using debug - - def compile(self, ttFont, base, version=2.0): - # build it all up backwards - oActions = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, []) - )[1] - oConstraints = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), - self.ruleConstraints + [b""], - (1, []), - )[1] - constraintCode = b"\000" + b"".join(self.ruleConstraints) - transes = [] - for t in self.stateTrans: - if sys.byteorder != "big": - t.byteswap() - transes.append(t.tobytes()) - if sys.byteorder != "big": - t.byteswap() - if not len(transes): - self.startStates = [0] - oRuleMap = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, []) - )[1] - passRanges = [] - gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()]) - for e in grUtils.entries(gidcolmap, sameval=True): - if e[1]: - passRanges.append((e[0], e[0] + e[1] - 1, e[2][0])) - self.numRules = len(self.actions) - self.fsmOffset = ( - sstruct.calcsize(Silf_pass_format) - + 8 - + len(passRanges) * 6 - + len(oRuleMap) * 2 - + 2 * oRuleMap[-1] - + 2 - + 2 * len(self.startStates) - + 3 * self.numRules - + 3 - + 4 * self.numRules - + 4 - ) - self.pcCode = ( - self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base - ) - self.rcCode = self.pcCode + len(self.passConstraints) - self.aCode = self.rcCode + len(constraintCode) - self.oDebug = 0 - # now generate output - data = sstruct.pack(Silf_pass_format, self) - data += grUtils.bininfo(len(passRanges), 6) - data += b"".join(struct.pack(">3H", *p) for p in passRanges) - data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap) - flatrules = reduce(lambda a, x: a + x, self.rules, []) - data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules) - data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext) - data += struct.pack((">%dH" % len(self.startStates)), *self.startStates) - data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys) - data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts) - data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints)) - data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints) - data += struct.pack((">%dH" % (self.numRules + 1)), *oActions) - return ( - data - + b"".join(transes) - + struct.pack("B", 0) - + self.passConstraints - + constraintCode - + b"".join(self.actions) - ) - - def toXML(self, writer, ttFont, version=2.0): - writesimple("info", self, writer, *pass_attrs_info) - writesimple("fsminfo", self, writer, *pass_attrs_fsm) - writer.begintag("colmap") - writer.newline() - wrapline( - writer, - [ - "{}={}".format(*x) - for x in sorted( - self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0]) - ) - ], - ) - writer.endtag("colmap") - writer.newline() - writer.begintag("staterulemap") - writer.newline() - for i, r in enumerate(self.rules): - writer.simpletag( - "state", - number=self.numRows - self.numSuccess + i, - rules=" ".join(map(str, r)), - ) - writer.newline() - writer.endtag("staterulemap") - writer.newline() - writer.begintag("rules") - writer.newline() - for i in range(len(self.actions)): - writer.begintag( - "rule", - index=i, - precontext=self.rulePreContexts[i], - sortkey=self.ruleSortKeys[i], - ) - writer.newline() - if len(self.ruleConstraints[i]): - writecode("constraint", writer, self.ruleConstraints[i]) - writecode("action", writer, self.actions[i]) - writer.endtag("rule") - writer.newline() - writer.endtag("rules") - writer.newline() - if len(self.passConstraints): - writecode("passConstraint", writer, self.passConstraints) - if len(self.stateTrans): - writer.begintag("fsm") - writer.newline() - writer.begintag("starts") - writer.write(" ".join(map(str, self.startStates))) - writer.endtag("starts") - writer.newline() - for i, s in enumerate(self.stateTrans): - writer.begintag("row", _i=i) - # no newlines here - writer.write(" ".join(map(str, s))) - writer.endtag("row") - writer.newline() - writer.endtag("fsm") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "info": - getSimple(self, attrs, *pass_attrs_info) - elif name == "fsminfo": - getSimple(self, attrs, *pass_attrs_fsm) - elif name == "colmap": - e = content_string(content) - for w in e.split(): - x = w.split("=") - if len(x) != 2 or x[0] == "" or x[1] == "": - continue - self.colMap[x[0]] = int(x[1]) - elif name == "staterulemap": - for e in content: - if not isinstance(e, tuple): - continue - tag, a, c = e - if tag == "state": - self.rules.append([int(x) for x in a["rules"].split(" ")]) - elif name == "rules": - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag != "rule": - continue - self.rulePreContexts.append(int(a["precontext"])) - self.ruleSortKeys.append(int(a["sortkey"])) - con = b"" - act = b"" - for e in c: - if not isinstance(e, tuple): - continue - tag, a, subc = e - if tag == "constraint": - con = readcode(subc) - elif tag == "action": - act = readcode(subc) - self.actions.append(act) - self.ruleConstraints.append(con) - elif name == "passConstraint": - self.passConstraints = readcode(content) - elif name == "fsm": - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag == "row": - s = array("H") - e = content_string(c) - s.extend(map(int, e.split())) - self.stateTrans.append(s) - elif tag == "starts": - s = [] - e = content_string(c) - s.extend(map(int, e.split())) - self.startStates = s diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py deleted file mode 100644 index 780abf304a469ddefbe35d5f5132367fe3c8213d..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - StableDiffusionAttendAndExcitePipeline, - UNet2DConditionModel, -) -from diffusers.utils import load_numpy, skip_mps, slow -from diffusers.utils.testing_utils import require_torch_gpu - -from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS -from ...test_pipelines_common import PipelineTesterMixin - - -@skip_mps -class StableDiffusionAttendAndExcitePipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = StableDiffusionAttendAndExcitePipeline - test_attention_slicing = False - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"}) - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - # SD2-specific config below - attention_head_dim=(2, 4), - use_linear_projection=True, - ) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - sample_size=128, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - # SD2-specific config below - hidden_act="gelu", - projection_dim=512, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = inputs = { - "prompt": "a cat and a frog", - "token_indices": [2, 5], - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "max_iter_to_alter": 2, - "thresholds": {0: 0.7}, - } - return inputs - - def test_inference(self): - device = "cpu" - - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - self.assertEqual(image.shape, (1, 64, 64, 3)) - expected_slice = np.array( - [0.5644937, 0.60543084, 0.48239064, 0.5206757, 0.55623394, 0.46045133, 0.5100435, 0.48919064, 0.4759359] - ) - max_diff = np.abs(image_slice.flatten() - expected_slice).max() - self.assertLessEqual(max_diff, 1e-3) - - def test_inference_batch_consistent(self): - # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches - self._test_inference_batch_consistent(batch_sizes=[2, 4]) - - -@require_torch_gpu -@slow -class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_attend_and_excite_fp16(self): - generator = torch.manual_seed(51) - - pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 - ) - pipe.to("cuda") - - prompt = "a painting of an elephant with glasses" - token_indices = [5, 7] - - image = pipe( - prompt=prompt, - token_indices=token_indices, - guidance_scale=7.5, - generator=generator, - num_inference_steps=5, - max_iter_to_alter=5, - output_type="numpy", - ).images[0] - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" - ) - assert np.abs((expected_image - image).max()) < 5e-1 diff --git a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/ldm.py b/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/ldm.py deleted file mode 100644 index b0392e28404c315e5d8ca5ede571da386f5d4b42..0000000000000000000000000000000000000000 --- a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/ldm.py +++ /dev/null @@ -1,715 +0,0 @@ -import os - -import torch -import numpy as np -from tqdm import tqdm -from audioldm.utils import default, instantiate_from_config, save_wave -from audioldm.latent_diffusion.ddpm import DDPM -from audioldm.variational_autoencoder.distributions import DiagonalGaussianDistribution -from audioldm.latent_diffusion.util import noise_like -from audioldm.latent_diffusion.ddim import DDIMSampler -import os - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - -class LatentDiffusion(DDPM): - """main class""" - - def __init__( - self, - device="cuda", - first_stage_config=None, - cond_stage_config=None, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - base_learning_rate=None, - *args, - **kwargs, - ): - self.device = device - self.learning_rate = base_learning_rate - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs["timesteps"] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = "concat" if concat_mode else "crossattn" - if cond_stage_config == "__is_unconditional__": - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - self.cond_stage_key_orig = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer("scale_factor", torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - - def make_cond_schedule( - self, - ): - self.cond_ids = torch.full( - size=(self.num_timesteps,), - fill_value=self.num_timesteps - 1, - dtype=torch.long, - ) - ids = torch.round( - torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) - ).long() - self.cond_ids[: self.num_timesteps_cond] = ids - - def register_schedule( - self, - given_betas=None, - beta_schedule="linear", - timesteps=1000, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - ): - super().register_schedule( - given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s - ) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != "__is_first_stage__" - assert config != "__is_unconditional__" - model = instantiate_from_config(config) - self.cond_stage_model = model - self.cond_stage_model = self.cond_stage_model.to(self.device) - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError( - f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" - ) - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, "encode") and callable( - self.cond_stage_model.encode - ): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - if len(c) == 1: - c = self.cond_stage_model([c[0], c[0]]) - c = c[0:1] - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - @torch.no_grad() - def get_input( - self, - batch, - k, - return_first_stage_encode=True, - return_first_stage_outputs=False, - force_c_encode=False, - cond_key=None, - return_original_cond=False, - bs=None, - ): - x = super().get_input(batch, k) - - if bs is not None: - x = x[:bs] - - x = x.to(self.device) - - if return_first_stage_encode: - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - else: - z = None - - if self.model.conditioning_key is not None: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ["caption", "coordinates_bbox"]: - xc = batch[cond_key] - elif cond_key == "class_label": - xc = batch - else: - # [bs, 1, 527] - xc = super().get_input(batch, cond_key) - if type(xc) == torch.Tensor: - xc = xc.to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - - if bs is not None: - c = c[:bs] - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {"pos_x": pos_x, "pos_y": pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, "b h w c -> b c h w").contiguous() - - z = 1.0 / self.scale_factor * z - return self.first_stage_model.decode(z) - - def mel_spectrogram_to_waveform(self, mel): - # Mel: [bs, 1, t-steps, fbins] - if len(mel.size()) == 4: - mel = mel.squeeze(1) - mel = mel.permute(0, 2, 1) - waveform = self.first_stage_model.vocoder(mel) - waveform = waveform.cpu().detach().numpy() - return waveform - - @torch.no_grad() - def encode_first_stage(self, x): - return self.first_stage_model.encode(x) - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - if self.model.conditioning_key == "concat": - key = "c_concat" - elif self.model.conditioning_key == "crossattn": - key = "c_crossattn" - else: - key = "c_film" - - cond = {key: cond} - - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def p_mean_variance( - self, - x, - c, - t, - clip_denoised: bool, - return_codebook_ids=False, - quantize_denoised=False, - return_x0=False, - score_corrector=None, - corrector_kwargs=None, - ): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score( - self, model_out, x, t, c, **corrector_kwargs - ) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1.0, 1.0) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior( - x_start=x_recon, x_t=x, t=t - ) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample( - self, - x, - c, - t, - clip_denoised=False, - repeat_noise=False, - return_codebook_ids=False, - quantize_denoised=False, - return_x0=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - ): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance( - x=x, - c=c, - t=t, - clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - ) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.0: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = ( - (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))).contiguous() - ) - - if return_codebook_ids: - return model_mean + nonzero_mask * ( - 0.5 * model_log_variance - ).exp() * noise, logits.argmax(dim=1) - if return_x0: - return ( - model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, - x0, - ) - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising( - self, - cond, - shape, - verbose=True, - callback=None, - quantize_denoised=False, - img_callback=None, - mask=None, - x0=None, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - batch_size=None, - x_T=None, - start_T=None, - log_every_t=None, - ): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = { - key: cond[key][:batch_size] - if not isinstance(cond[key], list) - else list(map(lambda x: x[:batch_size], cond[key])) - for key in cond - } - else: - cond = ( - [c[:batch_size] for c in cond] - if isinstance(cond, list) - else cond[:batch_size] - ) - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = ( - tqdm( - reversed(range(0, timesteps)), - desc="Progressive Generation", - total=timesteps, - ) - if verbose - else reversed(range(0, timesteps)) - ) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != "hybrid" - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample( - img, - cond, - ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, - return_x0=True, - temperature=temperature[i], - noise_dropout=noise_dropout, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - ) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1.0 - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: - callback(i) - if img_callback: - img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop( - self, - cond, - shape, - return_intermediates=False, - x_T=None, - verbose=True, - callback=None, - timesteps=None, - quantize_denoised=False, - mask=None, - x0=None, - img_callback=None, - start_T=None, - log_every_t=None, - ): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = ( - tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) - if verbose - else reversed(range(0, timesteps)) - ) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != "hybrid" - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample( - img, - cond, - ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, - ) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1.0 - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: - callback(i) - if img_callback: - img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample( - self, - cond, - batch_size=16, - return_intermediates=False, - x_T=None, - verbose=True, - timesteps=None, - quantize_denoised=False, - mask=None, - x0=None, - shape=None, - **kwargs, - ): - if shape is None: - shape = (batch_size, self.channels, self.latent_t_size, self.latent_f_size) - if cond is not None: - if isinstance(cond, dict): - cond = { - key: cond[key][:batch_size] - if not isinstance(cond[key], list) - else list(map(lambda x: x[:batch_size], cond[key])) - for key in cond - } - else: - cond = ( - [c[:batch_size] for c in cond] - if isinstance(cond, list) - else cond[:batch_size] - ) - return self.p_sample_loop( - cond, - shape, - return_intermediates=return_intermediates, - x_T=x_T, - verbose=verbose, - timesteps=timesteps, - quantize_denoised=quantize_denoised, - mask=mask, - x0=x0, - **kwargs, - ) - - @torch.no_grad() - def sample_log( - self, - cond, - batch_size, - ddim, - ddim_steps, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - use_plms=False, - mask=None, - **kwargs, - ): - - if mask is not None: - shape = (self.channels, mask.size()[-2], mask.size()[-1]) - else: - shape = (self.channels, self.latent_t_size, self.latent_f_size) - - intermediate = None - if ddim and not use_plms: - # print("Use ddim sampler") - - ddim_sampler = DDIMSampler(self) - samples, intermediates = ddim_sampler.sample( - ddim_steps, - batch_size, - shape, - cond, - verbose=False, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - mask=mask, - **kwargs, - ) - - else: - # print("Use DDPM sampler") - samples, intermediates = self.sample( - cond=cond, - batch_size=batch_size, - return_intermediates=True, - unconditional_guidance_scale=unconditional_guidance_scale, - mask=mask, - unconditional_conditioning=unconditional_conditioning, - **kwargs, - ) - - return samples, intermediate - - - @torch.no_grad() - def generate_sample( - self, - batchs, - ddim_steps=200, - ddim_eta=1.0, - x_T=None, - n_candidate_gen_per_text=1, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - name="waveform", - use_plms=False, - save=False, - **kwargs, - ): - # Generate n_candidate_gen_per_text times and select the best - # Batch: audio, text, fnames - assert x_T is None - try: - batchs = iter(batchs) - except TypeError: - raise ValueError("The first input argument should be an iterable object") - - if use_plms: - assert ddim_steps is not None - use_ddim = ddim_steps is not None - # waveform_save_path = os.path.join(self.get_log_dir(), name) - # os.makedirs(waveform_save_path, exist_ok=True) - # print("Waveform save path: ", waveform_save_path) - - with self.ema_scope("Generate"): - for batch in batchs: - z, c = self.get_input( - batch, - self.first_stage_key, - return_first_stage_outputs=False, - force_c_encode=True, - return_original_cond=False, - bs=None, - ) - text = super().get_input(batch, "text") - - # Generate multiple samples - batch_size = z.shape[0] * n_candidate_gen_per_text - c = torch.cat([c] * n_candidate_gen_per_text, dim=0) - text = text * n_candidate_gen_per_text - - if unconditional_guidance_scale != 1.0: - unconditional_conditioning = ( - self.cond_stage_model.get_unconditional_condition(batch_size) - ) - - samples, _ = self.sample_log( - cond=c, - batch_size=batch_size, - x_T=x_T, - ddim=use_ddim, - ddim_steps=ddim_steps, - eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - use_plms=use_plms, - ) - - mel = self.decode_first_stage(samples) - - waveform = self.mel_spectrogram_to_waveform(mel) - - if(waveform.shape[0] > 1): - similarity = self.cond_stage_model.cos_similarity( - torch.FloatTensor(waveform).squeeze(1), text - ) - - best_index = [] - for i in range(z.shape[0]): - candidates = similarity[i :: z.shape[0]] - max_index = torch.argmax(candidates).item() - best_index.append(i + max_index * z.shape[0]) - - waveform = waveform[best_index] - # print("Similarity between generated audio and text", similarity) - # print("Choose the following indexes:", best_index) - - return waveform diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/sync_batchnorm/comm.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/sync_batchnorm/comm.py deleted file mode 100644 index 922f8c4a3adaa9b32fdcaef09583be03b0d7eb2b..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/sync_batchnorm/comm.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def __getstate__(self): - return {'master_callback': self._master_callback} - - def __setstate__(self, state): - self.__init__(state['master_callback']) - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/deepwisdom/MetaGPT/metagpt/manager.py b/spaces/deepwisdom/MetaGPT/metagpt/manager.py deleted file mode 100644 index 9d238c6215b9fedce19a76d268c7d54063a6c224..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/manager.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 14:42 -@Author : alexanderwu -@File : manager.py -""" -from metagpt.llm import LLM -from metagpt.logs import logger -from metagpt.schema import Message - - -class Manager: - def __init__(self, llm: LLM = LLM()): - self.llm = llm # Large Language Model - self.role_directions = { - "BOSS": "Product Manager", - "Product Manager": "Architect", - "Architect": "Engineer", - "Engineer": "QA Engineer", - "QA Engineer": "Product Manager" - } - self.prompt_template = """ - Given the following message: - {message} - - And the current status of roles: - {roles} - - Which role should handle this message? - """ - - async def handle(self, message: Message, environment): - """ - 管理员处理信息,现在简单的将信息递交给下一个人 - The administrator processes the information, now simply passes the information on to the next person - :param message: - :param environment: - :return: - """ - # Get all roles from the environment - roles = environment.get_roles() - # logger.debug(f"{roles=}, {message=}") - - # Build a context for the LLM to understand the situation - # context = { - # "message": str(message), - # "roles": {role.name: role.get_info() for role in roles}, - # } - # Ask the LLM to decide which role should handle the message - # chosen_role_name = self.llm.ask(self.prompt_template.format(context)) - - # FIXME: 现在通过简单的字典决定流向,但之后还是应该有思考过程 - #The direction of flow is now determined by a simple dictionary, but there should still be a thought process afterwards - next_role_profile = self.role_directions[message.role] - # logger.debug(f"{next_role_profile}") - for _, role in roles.items(): - if next_role_profile == role.profile: - next_role = role - break - else: - logger.error(f"No available role can handle message: {message}.") - return - - # Find the chosen role and handle the message - return await next_role.handle(message) diff --git a/spaces/deepwisdom/MetaGPT/metagpt/provider/base_gpt_api.py b/spaces/deepwisdom/MetaGPT/metagpt/provider/base_gpt_api.py deleted file mode 100644 index 7351e69168e100914631776cdc2f0103b6e50bf2..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/provider/base_gpt_api.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/5 23:04 -@Author : alexanderwu -@File : base_gpt_api.py -@Desc : mashenquan, 2023/8/22. + try catch -""" -from abc import abstractmethod -from typing import Optional - -from metagpt.logs import logger -from metagpt.provider.base_chatbot import BaseChatbot - - -class BaseGPTAPI(BaseChatbot): - """GPT API abstract class, requiring all inheritors to provide a series of standard capabilities""" - - system_prompt = "You are a helpful assistant." - - def _user_msg(self, msg: str) -> dict[str, str]: - return {"role": "user", "content": msg} - - def _assistant_msg(self, msg: str) -> dict[str, str]: - return {"role": "assistant", "content": msg} - - def _system_msg(self, msg: str) -> dict[str, str]: - return {"role": "system", "content": msg} - - def _system_msgs(self, msgs: list[str]) -> list[dict[str, str]]: - return [self._system_msg(msg) for msg in msgs] - - def _default_system_msg(self): - return self._system_msg(self.system_prompt) - - def ask(self, msg: str) -> str: - message = [self._default_system_msg(), self._user_msg(msg)] - rsp = self.completion(message) - return self.get_choice_text(rsp) - - async def aask(self, msg: str, system_msgs: Optional[list[str]] = None, generator: bool = False) -> str: - if system_msgs: - message = self._system_msgs(system_msgs) + [self._user_msg(msg)] - else: - message = [self._default_system_msg(), self._user_msg(msg)] - try: - rsp = await self.acompletion_text(message, stream=True, generator=generator) - except Exception as e: - logger.exception(f"{e}") - logger.info(f"ask:{msg}, error:{e}") - raise e - logger.info(f"ask:{msg}, anwser:{rsp}") - return rsp - - def _extract_assistant_rsp(self, context): - return "\n".join([i["content"] for i in context if i["role"] == "assistant"]) - - def ask_batch(self, msgs: list) -> str: - context = [] - for msg in msgs: - umsg = self._user_msg(msg) - context.append(umsg) - rsp = self.completion(context) - rsp_text = self.get_choice_text(rsp) - context.append(self._assistant_msg(rsp_text)) - return self._extract_assistant_rsp(context) - - async def aask_batch(self, msgs: list) -> str: - """Sequential questioning""" - context = [] - for msg in msgs: - umsg = self._user_msg(msg) - context.append(umsg) - rsp_text = await self.acompletion_text(context) - context.append(self._assistant_msg(rsp_text)) - return self._extract_assistant_rsp(context) - - def ask_code(self, msgs: list[str]) -> str: - """FIXME: No code segment filtering has been done here, and all results are actually displayed""" - rsp_text = self.ask_batch(msgs) - return rsp_text - - async def aask_code(self, msgs: list[str]) -> str: - """FIXME: No code segment filtering has been done here, and all results are actually displayed""" - rsp_text = await self.aask_batch(msgs) - return rsp_text - - @abstractmethod - def completion(self, messages: list[dict]): - """All GPTAPIs are required to provide the standard OpenAI completion interface - [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "hello, show me python hello world code"}, - # {"role": "assistant", "content": ...}, # If there is an answer in the history, also include it - ] - """ - - @abstractmethod - async def acompletion(self, messages: list[dict]): - """Asynchronous version of completion - All GPTAPIs are required to provide the standard OpenAI completion interface - [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "hello, show me python hello world code"}, - # {"role": "assistant", "content": ...}, # If there is an answer in the history, also include it - ] - """ - - @abstractmethod - async def acompletion_text(self, messages: list[dict], stream=False) -> str: - """Asynchronous version of completion. Return str. Support stream-print""" - - def get_choice_text(self, rsp: dict) -> str: - """Required to provide the first text of choice""" - return rsp.get("choices")[0]["message"]["content"] - - def messages_to_prompt(self, messages: list[dict]): - """[{"role": "user", "content": msg}] to user: etc.""" - return "\n".join([f"{i['role']}: {i['content']}" for i in messages]) - - def messages_to_dict(self, messages): - """objects to [{"role": "user", "content": msg}] etc.""" - return [i.to_dict() for i in messages] diff --git a/spaces/diacanFperku/AutoGPT/Activation TruLaser 2015 Activation.md b/spaces/diacanFperku/AutoGPT/Activation TruLaser 2015 Activation.md deleted file mode 100644 index 1e7fd8e9e46d05dff707eb7bdd464f0000fcef37..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Activation TruLaser 2015 Activation.md +++ /dev/null @@ -1,6 +0,0 @@ -

activation TruLaser 2015 activation


DOWNLOAD ►►►►► https://gohhs.com/2uFUDF



- -Siemens CNC Controls, front loading pallet tables, 2500W Trucoax laser, 5ft x 10ft. Click to request a quote. 2015 TRUMPF TruLaser 3030 Fiber. This fiber based laser engraver is an excellent solution for laser cutting, engraving and marking. It is equipped with a 3D scanning module, as well as a range of laser cutting and engraving control options, including temperature and pressure control inside the laser tube to maintain maximum efficiency. It also has two Ethernet interfaces that allow for remote configuration and maintenance. Click to request a quote. 8a78ff9644
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Khulasa Quran Urdu Pdf Free.md b/spaces/diacanFperku/AutoGPT/Khulasa Quran Urdu Pdf Free.md deleted file mode 100644 index ae569512ee3411820f582d576ea9c95d7574d7f1..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Khulasa Quran Urdu Pdf Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

Khulasa Quran Urdu Pdf Free


DOWNLOADhttps://gohhs.com/2uFU6X



-
-Dec 29, 2018 - IslamiBooks Quran & Madni Ittar House Waqas Plaza Ground Floor ... الإبداع فى النحو , اسهل طرق تعلم النحو مع الخرائط الذهنية , pdf Free Books, ... Free amliyat books: 32 amliyat books in urdu free Free Books Online, Free Pdf. 4d29de3e1b
-
-
-

diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/modules.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/modules.py deleted file mode 100644 index 92e0f32a51c472bfd1659a50a95a95d195281d2b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/modules.py +++ /dev/null @@ -1,452 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform -from attentions import Encoder - -LRELU_SLOPE = 0.1 - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x -class TransformerCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=0, - mean_only=False, - wn_sharing_parameter=None, - gin_channels = 0 - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/losses.py b/spaces/digitalxingtong/Nailv-Bert-Vits2/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-Bert-Vits2/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/dineshreddy/WALT/mmdet/core/bbox/samplers/ohem_sampler.py b/spaces/dineshreddy/WALT/mmdet/core/bbox/samplers/ohem_sampler.py deleted file mode 100644 index 8b99f60ef0176f1b7a56665fb0f59272f65b84cd..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/core/bbox/samplers/ohem_sampler.py +++ /dev/null @@ -1,107 +0,0 @@ -import torch - -from ..builder import BBOX_SAMPLERS -from ..transforms import bbox2roi -from .base_sampler import BaseSampler - - -@BBOX_SAMPLERS.register_module() -class OHEMSampler(BaseSampler): - r"""Online Hard Example Mining Sampler described in `Training Region-based - Object Detectors with Online Hard Example Mining - `_. - """ - - def __init__(self, - num, - pos_fraction, - context, - neg_pos_ub=-1, - add_gt_as_proposals=True, - **kwargs): - super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, - add_gt_as_proposals) - self.context = context - if not hasattr(self.context, 'num_stages'): - self.bbox_head = self.context.bbox_head - else: - self.bbox_head = self.context.bbox_head[self.context.current_stage] - - def hard_mining(self, inds, num_expected, bboxes, labels, feats): - with torch.no_grad(): - rois = bbox2roi([bboxes]) - if not hasattr(self.context, 'num_stages'): - bbox_results = self.context._bbox_forward(feats, rois) - else: - bbox_results = self.context._bbox_forward( - self.context.current_stage, feats, rois) - cls_score = bbox_results['cls_score'] - loss = self.bbox_head.loss( - cls_score=cls_score, - bbox_pred=None, - rois=rois, - labels=labels, - label_weights=cls_score.new_ones(cls_score.size(0)), - bbox_targets=None, - bbox_weights=None, - reduction_override='none')['loss_cls'] - _, topk_loss_inds = loss.topk(num_expected) - return inds[topk_loss_inds] - - def _sample_pos(self, - assign_result, - num_expected, - bboxes=None, - feats=None, - **kwargs): - """Sample positive boxes. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - num_expected (int): Number of expected positive samples - bboxes (torch.Tensor, optional): Boxes. Defaults to None. - feats (list[torch.Tensor], optional): Multi-level features. - Defaults to None. - - Returns: - torch.Tensor: Indices of positive samples - """ - # Sample some hard positive samples - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], - assign_result.labels[pos_inds], feats) - - def _sample_neg(self, - assign_result, - num_expected, - bboxes=None, - feats=None, - **kwargs): - """Sample negative boxes. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - num_expected (int): Number of expected negative samples - bboxes (torch.Tensor, optional): Boxes. Defaults to None. - feats (list[torch.Tensor], optional): Multi-level features. - Defaults to None. - - Returns: - torch.Tensor: Indices of negative samples - """ - # Sample some hard negative samples - neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) - if neg_inds.numel() != 0: - neg_inds = neg_inds.squeeze(1) - if len(neg_inds) <= num_expected: - return neg_inds - else: - neg_labels = assign_result.labels.new_empty( - neg_inds.size(0)).fill_(self.bbox_head.num_classes) - return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], - neg_labels, feats) diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/shared_heads/__init__.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/shared_heads/__init__.py deleted file mode 100644 index bbe70145b8bf7c304370f725f5afa8db98666679..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/shared_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .res_layer import ResLayer - -__all__ = ['ResLayer'] diff --git a/spaces/dlmn/BHASHAVANI/README.md b/spaces/dlmn/BHASHAVANI/README.md deleted file mode 100644 index 4090135b96ab72f898bacd1da6c39ee3b6753cc2..0000000000000000000000000000000000000000 --- a/spaces/dlmn/BHASHAVANI/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: BHASHAVANI -emoji: ⚡ -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dnth/gpt-neo-paraphrase/README.md b/spaces/dnth/gpt-neo-paraphrase/README.md deleted file mode 100644 index 9206d5f84a2c2ef74ef4f19c3016594184c3f785..0000000000000000000000000000000000000000 --- a/spaces/dnth/gpt-neo-paraphrase/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gpt Neo Paraphrase -emoji: 👁 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/doevent/df/app.py b/spaces/doevent/df/app.py deleted file mode 100644 index 0583eea1090bae489d37e7308eaa7585d0df94db..0000000000000000000000000000000000000000 --- a/spaces/doevent/df/app.py +++ /dev/null @@ -1,69 +0,0 @@ -import gradio as gr -import openai -import os -import time -import requests -import re - -name = "adventblocks" -openai.api_key = os.environ['token_adventblocks'] -openai.api_base = "https://api.daku.tech/v1" -os.mkdir("output") - -stop_words_mini = ('pussy', 'fucked', 'fuck', ' sex ', 'naked', 'nude', ' cock ', ' dick ', 'penis', 'porn ', ' vibrator', - 'vagina', 'vulva', ' anus ', 'hentai', 'masturbation', 'topless', "porno", " ass ", "голый", "голая", "голое", - "обнажен", "секс", "порно", " анус", " tits", "nipples", "nsfw", "loli", "nazism", "without clothes", "pornstar", " shit") - -def greet(prompt_text: str, timer: int, count: int, model: str): - start_load = time.time() - - prompt_text = prompt_text.replace("\n", " ") - # stop list - for filter_word in stop_words_mini: - if re.search(rf"{filter_word}", prompt_text, flags=re.IGNORECASE): - print(f"FILTER WORD: {filter_word}\n{prompt_text}") - return [] - - dir = "./output" - image_list = [os.path.abspath(os.path.join(dir,i)) for i in os.listdir(dir)] - for rm in image_list: - os.remove(rm) - - try: - print(f"Send to {model}... --> {prompt_text}") - tr = openai.Image.create(prompt=prompt_text, model=model, n=count, size="1024x1024") - - finish_load = int(time.time() - start_load) - print(f"Time: {finish_load} sec") - if finish_load < timer: - print(f"Wait: {timer-finish_load} sec") - time.sleep(timer-finish_load) - except (openai.error.InvalidRequestError, openai.error.APIError) as e: - if str(e).startswith("Forbidden"): - print(f"Forbidden Error {name}: {e}\n{prompt_text}") - return [] - print(f"Error {name}: {e}\n{prompt_text}") - time.sleep(3) - return [] - - for counter, image in enumerate(tr['data']): - response = requests.get(image['url']) - image_data = response.content - filename = f"{dir}/{counter}.png" - with open(filename, "wb") as file: - file.write(image_data) - - image_list = [os.path.abspath(os.path.join(dir,i)) for i in os.listdir(dir)] - - return image_list - - -iface = gr.Interface(fn=greet, - inputs=[gr.Textbox(type="text", value="a cat", label="Prompt"), - gr.Slider(0, 800, step=1, value=5, label="Timer"), - gr.Slider(1, 10, step=1, value=2, label="Counter image"), - gr.Textbox(type="text", value="deepfloyd-if", label="Model")], - outputs=gr.Gallery(), allow_flagging="never") - -iface.queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True) - diff --git a/spaces/dorkai/singpt/convert-to-safetensors.py b/spaces/dorkai/singpt/convert-to-safetensors.py deleted file mode 100644 index 63baaa9726ab48025d2ba473d029bb3f1153aa3a..0000000000000000000000000000000000000000 --- a/spaces/dorkai/singpt/convert-to-safetensors.py +++ /dev/null @@ -1,38 +0,0 @@ -''' - -Converts a transformers model to safetensors format and shards it. - -This makes it faster to load (because of safetensors) and lowers its RAM usage -while loading (because of sharding). - -Based on the original script by 81300: - -https://gist.github.com/81300/fe5b08bff1cba45296a829b9d6b0f303 - -''' - -import argparse -from pathlib import Path - -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer - -parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=54)) -parser.add_argument('MODEL', type=str, default=None, nargs='?', help="Path to the input model.") -parser.add_argument('--output', type=str, default=None, help='Path to the output folder (default: models/{model_name}_safetensors).') -parser.add_argument("--max-shard-size", type=str, default="2GB", help="Maximum size of a shard in GB or MB (default: %(default)s).") -parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') -args = parser.parse_args() - -if __name__ == '__main__': - path = Path(args.MODEL) - model_name = path.name - - print(f"Loading {model_name}...") - model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if args.bf16 else torch.float16) - tokenizer = AutoTokenizer.from_pretrained(path) - - out_folder = args.output or Path(f"models/{model_name}_safetensors") - print(f"Saving the converted model to {out_folder} with a maximum shard size of {args.max_shard_size}...") - model.save_pretrained(out_folder, max_shard_size=args.max_shard_size, safe_serialization=True) - tokenizer.save_pretrained(out_folder) diff --git a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/google_translate/script.py b/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/google_translate/script.py deleted file mode 100644 index 63226107b2c2afe086fc343c7b7f7df78bef3f8a..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/google_translate/script.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -from deep_translator import GoogleTranslator - -params = { - "language string": "ja", -} - -language_codes = {'Afrikaans': 'af', 'Albanian': 'sq', 'Amharic': 'am', 'Arabic': 'ar', 'Armenian': 'hy', 'Azerbaijani': 'az', 'Basque': 'eu', 'Belarusian': 'be', 'Bengali': 'bn', 'Bosnian': 'bs', 'Bulgarian': 'bg', 'Catalan': 'ca', 'Cebuano': 'ceb', 'Chinese (Simplified)': 'zh-CN', 'Chinese (Traditional)': 'zh-TW', 'Corsican': 'co', 'Croatian': 'hr', 'Czech': 'cs', 'Danish': 'da', 'Dutch': 'nl', 'English': 'en', 'Esperanto': 'eo', 'Estonian': 'et', 'Finnish': 'fi', 'French': 'fr', 'Frisian': 'fy', 'Galician': 'gl', 'Georgian': 'ka', 'German': 'de', 'Greek': 'el', 'Gujarati': 'gu', 'Haitian Creole': 'ht', 'Hausa': 'ha', 'Hawaiian': 'haw', 'Hebrew': 'iw', 'Hindi': 'hi', 'Hmong': 'hmn', 'Hungarian': 'hu', 'Icelandic': 'is', 'Igbo': 'ig', 'Indonesian': 'id', 'Irish': 'ga', 'Italian': 'it', 'Japanese': 'ja', 'Javanese': 'jw', 'Kannada': 'kn', 'Kazakh': 'kk', 'Khmer': 'km', 'Korean': 'ko', 'Kurdish': 'ku', 'Kyrgyz': 'ky', 'Lao': 'lo', 'Latin': 'la', 'Latvian': 'lv', 'Lithuanian': 'lt', 'Luxembourgish': 'lb', 'Macedonian': 'mk', 'Malagasy': 'mg', 'Malay': 'ms', 'Malayalam': 'ml', 'Maltese': 'mt', 'Maori': 'mi', 'Marathi': 'mr', 'Mongolian': 'mn', 'Myanmar (Burmese)': 'my', 'Nepali': 'ne', 'Norwegian': 'no', 'Nyanja (Chichewa)': 'ny', 'Pashto': 'ps', 'Persian': 'fa', 'Polish': 'pl', 'Portuguese (Portugal, Brazil)': 'pt', 'Punjabi': 'pa', 'Romanian': 'ro', 'Russian': 'ru', 'Samoan': 'sm', 'Scots Gaelic': 'gd', 'Serbian': 'sr', 'Sesotho': 'st', 'Shona': 'sn', 'Sindhi': 'sd', 'Sinhala (Sinhalese)': 'si', 'Slovak': 'sk', 'Slovenian': 'sl', 'Somali': 'so', 'Spanish': 'es', 'Sundanese': 'su', 'Swahili': 'sw', 'Swedish': 'sv', 'Tagalog (Filipino)': 'tl', 'Tajik': 'tg', 'Tamil': 'ta', 'Telugu': 'te', 'Thai': 'th', 'Turkish': 'tr', 'Ukrainian': 'uk', 'Urdu': 'ur', 'Uzbek': 'uz', 'Vietnamese': 'vi', 'Welsh': 'cy', 'Xhosa': 'xh', 'Yiddish': 'yi', 'Yoruba': 'yo', 'Zulu': 'zu'} - - -def input_modifier(string): - """ - This function is applied to your text inputs before - they are fed into the model. - """ - - return GoogleTranslator(source=params['language string'], target='en').translate(string) - - -def output_modifier(string): - """ - This function is applied to the model outputs. - """ - - return GoogleTranslator(source='en', target=params['language string']).translate(string) - - -def bot_prefix_modifier(string): - """ - This function is only applied in chat mode. It modifies - the prefix text for the Bot and can be used to bias its - behavior. - """ - - return string - - -def ui(): - # Finding the language name from the language code to use as the default value - language_name = list(language_codes.keys())[list(language_codes.values()).index(params['language string'])] - - # Gradio elements - language = gr.Dropdown(value=language_name, choices=[k for k in language_codes], label='Language') - - # Event functions to update the parameters in the backend - language.change(lambda x: params.update({"language string": language_codes[x]}), language, None) diff --git a/spaces/duycse1603/math2tex/HybridViT/module/component/common/gated_sum.py b/spaces/duycse1603/math2tex/HybridViT/module/component/common/gated_sum.py deleted file mode 100644 index 387ef11bbe1f1d90a91a5a44fc4c517235d6add7..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/HybridViT/module/component/common/gated_sum.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch - - -class GatedSum(torch.nn.Module): - """ - This `Module` represents a gated sum of two tensors `a` and `b`. Specifically: - ``` - f = activation(W [a; b]) - out = f * a + (1 - f) * b - ``` - # Parameters - input_dim : `int`, required - The dimensionality of the input. We assume the input have shape `(..., input_dim)`. - activation : `Activation`, optional (default = `torch.nn.Sigmoid()`) - The activation function to use. - """ - - def __init__(self, input_dim: int, activation = torch.nn.Sigmoid()) -> None: - super().__init__() - self.input_dim = input_dim - self._gate = torch.nn.Linear(input_dim * 2, 1) - self._activation = activation - - def get_input_dim(self): - return self.input_dim - - def get_output_dim(self): - return self.input_dim - - def forward(self, input_a: torch.Tensor, input_b: torch.Tensor) -> torch.Tensor: - if input_a.size() != input_b.size(): - raise ValueError("The input must have the same size.") - if input_a.size(-1) != self.input_dim: - raise ValueError("Input size must match `input_dim`.") - gate_value = self._activation(self._gate(torch.cat([input_a, input_b], -1))) - return gate_value * input_a + (1 - gate_value) * input_b \ No newline at end of file diff --git a/spaces/espejelomar/Identify-the-breed-of-your-pet/__init__.py b/spaces/espejelomar/Identify-the-breed-of-your-pet/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/etri-vilab/Ko-LLaVA/static/css/bulma.min.css b/spaces/etri-vilab/Ko-LLaVA/static/css/bulma.min.css deleted file mode 100644 index a807a314caa0df0ddccacd6b732d83f5ffde7a89..0000000000000000000000000000000000000000 --- a/spaces/etri-vilab/Ko-LLaVA/static/css/bulma.min.css +++ /dev/null @@ -1 +0,0 @@ -/*! bulma.io v0.9.1 | MIT License | github.com/jgthms/bulma */@-webkit-keyframes spinAround{from{transform:rotate(0)}to{transform:rotate(359deg)}}@keyframes spinAround{from{transform:rotate(0)}to{transform:rotate(359deg)}}.breadcrumb,.button,.delete,.file,.is-unselectable,.modal-close,.pagination-ellipsis,.pagination-link,.pagination-next,.pagination-previous,.tabs{-webkit-touch-callout:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.navbar-link:not(.is-arrowless)::after,.select:not(.is-multiple):not(.is-loading)::after{border:3px solid transparent;border-radius:2px;border-right:0;border-top:0;content:" ";display:block;height:.625em;margin-top:-.4375em;pointer-events:none;position:absolute;top:50%;transform:rotate(-45deg);transform-origin:center;width:.625em}.block:not(:last-child),.box:not(:last-child),.breadcrumb:not(:last-child),.content:not(:last-child),.highlight:not(:last-child),.level:not(:last-child),.message:not(:last-child),.notification:not(:last-child),.pagination:not(:last-child),.progress:not(:last-child),.subtitle:not(:last-child),.table-container:not(:last-child),.table:not(:last-child),.tabs:not(:last-child),.title:not(:last-child){margin-bottom:1.5rem}.delete,.modal-close{-moz-appearance:none;-webkit-appearance:none;background-color:rgba(10,10,10,.2);border:none;border-radius:290486px;cursor:pointer;pointer-events:auto;display:inline-block;flex-grow:0;flex-shrink:0;font-size:0;height:20px;max-height:20px;max-width:20px;min-height:20px;min-width:20px;outline:0;position:relative;vertical-align:top;width:20px}.delete::after,.delete::before,.modal-close::after,.modal-close::before{background-color:#fff;content:"";display:block;left:50%;position:absolute;top:50%;transform:translateX(-50%) translateY(-50%) rotate(45deg);transform-origin:center center}.delete::before,.modal-close::before{height:2px;width:50%}.delete::after,.modal-close::after{height:50%;width:2px}.delete:focus,.delete:hover,.modal-close:focus,.modal-close:hover{background-color:rgba(10,10,10,.3)}.delete:active,.modal-close:active{background-color:rgba(10,10,10,.4)}.is-small.delete,.is-small.modal-close{height:16px;max-height:16px;max-width:16px;min-height:16px;min-width:16px;width:16px}.is-medium.delete,.is-medium.modal-close{height:24px;max-height:24px;max-width:24px;min-height:24px;min-width:24px;width:24px}.is-large.delete,.is-large.modal-close{height:32px;max-height:32px;max-width:32px;min-height:32px;min-width:32px;width:32px}.button.is-loading::after,.control.is-loading::after,.loader,.select.is-loading::after{-webkit-animation:spinAround .5s infinite linear;animation:spinAround .5s infinite linear;border:2px solid #dbdbdb;border-radius:290486px;border-right-color:transparent;border-top-color:transparent;content:"";display:block;height:1em;position:relative;width:1em}.hero-video,.image.is-16by9 .has-ratio,.image.is-16by9 img,.image.is-1by1 .has-ratio,.image.is-1by1 img,.image.is-1by2 .has-ratio,.image.is-1by2 img,.image.is-1by3 .has-ratio,.image.is-1by3 img,.image.is-2by1 .has-ratio,.image.is-2by1 img,.image.is-2by3 .has-ratio,.image.is-2by3 img,.image.is-3by1 .has-ratio,.image.is-3by1 img,.image.is-3by2 .has-ratio,.image.is-3by2 img,.image.is-3by4 .has-ratio,.image.is-3by4 img,.image.is-3by5 .has-ratio,.image.is-3by5 img,.image.is-4by3 .has-ratio,.image.is-4by3 img,.image.is-4by5 .has-ratio,.image.is-4by5 img,.image.is-5by3 .has-ratio,.image.is-5by3 img,.image.is-5by4 .has-ratio,.image.is-5by4 img,.image.is-9by16 .has-ratio,.image.is-9by16 img,.image.is-square .has-ratio,.image.is-square img,.is-overlay,.modal,.modal-background{bottom:0;left:0;position:absolute;right:0;top:0}.button,.file-cta,.file-name,.input,.pagination-ellipsis,.pagination-link,.pagination-next,.pagination-previous,.select select,.textarea{-moz-appearance:none;-webkit-appearance:none;align-items:center;border:1px solid transparent;border-radius:4px;box-shadow:none;display:inline-flex;font-size:1rem;height:2.5em;justify-content:flex-start;line-height:1.5;padding-bottom:calc(.5em - 1px);padding-left:calc(.75em - 1px);padding-right:calc(.75em - 1px);padding-top:calc(.5em - 1px);position:relative;vertical-align:top}.button:active,.button:focus,.file-cta:active,.file-cta:focus,.file-name:active,.file-name:focus,.input:active,.input:focus,.is-active.button,.is-active.file-cta,.is-active.file-name,.is-active.input,.is-active.pagination-ellipsis,.is-active.pagination-link,.is-active.pagination-next,.is-active.pagination-previous,.is-active.textarea,.is-focused.button,.is-focused.file-cta,.is-focused.file-name,.is-focused.input,.is-focused.pagination-ellipsis,.is-focused.pagination-link,.is-focused.pagination-next,.is-focused.pagination-previous,.is-focused.textarea,.pagination-ellipsis:active,.pagination-ellipsis:focus,.pagination-link:active,.pagination-link:focus,.pagination-next:active,.pagination-next:focus,.pagination-previous:active,.pagination-previous:focus,.select select.is-active,.select select.is-focused,.select select:active,.select select:focus,.textarea:active,.textarea:focus{outline:0}.button[disabled],.file-cta[disabled],.file-name[disabled],.input[disabled],.pagination-ellipsis[disabled],.pagination-link[disabled],.pagination-next[disabled],.pagination-previous[disabled],.select fieldset[disabled] select,.select select[disabled],.textarea[disabled],fieldset[disabled] .button,fieldset[disabled] .file-cta,fieldset[disabled] .file-name,fieldset[disabled] .input,fieldset[disabled] .pagination-ellipsis,fieldset[disabled] .pagination-link,fieldset[disabled] .pagination-next,fieldset[disabled] .pagination-previous,fieldset[disabled] .select select,fieldset[disabled] .textarea{cursor:not-allowed}/*! minireset.css v0.0.6 | MIT License | github.com/jgthms/minireset.css */blockquote,body,dd,dl,dt,fieldset,figure,h1,h2,h3,h4,h5,h6,hr,html,iframe,legend,li,ol,p,pre,textarea,ul{margin:0;padding:0}h1,h2,h3,h4,h5,h6{font-size:100%;font-weight:400}ul{list-style:none}button,input,select,textarea{margin:0}html{box-sizing:border-box}*,::after,::before{box-sizing:inherit}img,video{height:auto;max-width:100%}iframe{border:0}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}td:not([align]),th:not([align]){text-align:inherit}html{background-color:#fff;font-size:16px;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;min-width:300px;overflow-x:hidden;overflow-y:scroll;text-rendering:optimizeLegibility;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;-ms-text-size-adjust:100%;text-size-adjust:100%}article,aside,figure,footer,header,hgroup,section{display:block}body,button,input,optgroup,select,textarea{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif}code,pre{-moz-osx-font-smoothing:auto;-webkit-font-smoothing:auto;font-family:monospace}body{color:#4a4a4a;font-size:1em;font-weight:400;line-height:1.5}a{color:#3273dc;cursor:pointer;text-decoration:none}a strong{color:currentColor}a:hover{color:#363636}code{background-color:#f5f5f5;color:#da1039;font-size:.875em;font-weight:400;padding:.25em .5em .25em}hr{background-color:#f5f5f5;border:none;display:block;height:2px;margin:1.5rem 0}img{height:auto;max-width:100%}input[type=checkbox],input[type=radio]{vertical-align:baseline}small{font-size:.875em}span{font-style:inherit;font-weight:inherit}strong{color:#363636;font-weight:700}fieldset{border:none}pre{-webkit-overflow-scrolling:touch;background-color:#f5f5f5;color:#4a4a4a;font-size:.875em;overflow-x:auto;padding:1.25rem 1.5rem;white-space:pre;word-wrap:normal}pre code{background-color:transparent;color:currentColor;font-size:1em;padding:0}table td,table th{vertical-align:top}table td:not([align]),table th:not([align]){text-align:inherit}table th{color:#363636}.box{background-color:#fff;border-radius:6px;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);color:#4a4a4a;display:block;padding:1.25rem}a.box:focus,a.box:hover{box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px #3273dc}a.box:active{box-shadow:inset 0 1px 2px rgba(10,10,10,.2),0 0 0 1px #3273dc}.button{background-color:#fff;border-color:#dbdbdb;border-width:1px;color:#363636;cursor:pointer;justify-content:center;padding-bottom:calc(.5em - 1px);padding-left:1em;padding-right:1em;padding-top:calc(.5em - 1px);text-align:center;white-space:nowrap}.button strong{color:inherit}.button .icon,.button .icon.is-large,.button .icon.is-medium,.button .icon.is-small{height:1.5em;width:1.5em}.button .icon:first-child:not(:last-child){margin-left:calc(-.5em - 1px);margin-right:.25em}.button .icon:last-child:not(:first-child){margin-left:.25em;margin-right:calc(-.5em - 1px)}.button .icon:first-child:last-child{margin-left:calc(-.5em - 1px);margin-right:calc(-.5em - 1px)}.button.is-hovered,.button:hover{border-color:#b5b5b5;color:#363636}.button.is-focused,.button:focus{border-color:#3273dc;color:#363636}.button.is-focused:not(:active),.button:focus:not(:active){box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.button.is-active,.button:active{border-color:#4a4a4a;color:#363636}.button.is-text{background-color:transparent;border-color:transparent;color:#4a4a4a;text-decoration:underline}.button.is-text.is-focused,.button.is-text.is-hovered,.button.is-text:focus,.button.is-text:hover{background-color:#f5f5f5;color:#363636}.button.is-text.is-active,.button.is-text:active{background-color:#e8e8e8;color:#363636}.button.is-text[disabled],fieldset[disabled] .button.is-text{background-color:transparent;border-color:transparent;box-shadow:none}.button.is-white{background-color:#fff;border-color:transparent;color:#0a0a0a}.button.is-white.is-hovered,.button.is-white:hover{background-color:#f9f9f9;border-color:transparent;color:#0a0a0a}.button.is-white.is-focused,.button.is-white:focus{border-color:transparent;color:#0a0a0a}.button.is-white.is-focused:not(:active),.button.is-white:focus:not(:active){box-shadow:0 0 0 .125em rgba(255,255,255,.25)}.button.is-white.is-active,.button.is-white:active{background-color:#f2f2f2;border-color:transparent;color:#0a0a0a}.button.is-white[disabled],fieldset[disabled] .button.is-white{background-color:#fff;border-color:transparent;box-shadow:none}.button.is-white.is-inverted{background-color:#0a0a0a;color:#fff}.button.is-white.is-inverted.is-hovered,.button.is-white.is-inverted:hover{background-color:#000}.button.is-white.is-inverted[disabled],fieldset[disabled] .button.is-white.is-inverted{background-color:#0a0a0a;border-color:transparent;box-shadow:none;color:#fff}.button.is-white.is-loading::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-white.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-white.is-outlined.is-focused,.button.is-white.is-outlined.is-hovered,.button.is-white.is-outlined:focus,.button.is-white.is-outlined:hover{background-color:#fff;border-color:#fff;color:#0a0a0a}.button.is-white.is-outlined.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-white.is-outlined.is-loading.is-focused::after,.button.is-white.is-outlined.is-loading.is-hovered::after,.button.is-white.is-outlined.is-loading:focus::after,.button.is-white.is-outlined.is-loading:hover::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-white.is-outlined[disabled],fieldset[disabled] .button.is-white.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-white.is-inverted.is-outlined{background-color:transparent;border-color:#0a0a0a;color:#0a0a0a}.button.is-white.is-inverted.is-outlined.is-focused,.button.is-white.is-inverted.is-outlined.is-hovered,.button.is-white.is-inverted.is-outlined:focus,.button.is-white.is-inverted.is-outlined:hover{background-color:#0a0a0a;color:#fff}.button.is-white.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-white.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-white.is-inverted.is-outlined.is-loading:focus::after,.button.is-white.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-white.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-white.is-inverted.is-outlined{background-color:transparent;border-color:#0a0a0a;box-shadow:none;color:#0a0a0a}.button.is-black{background-color:#0a0a0a;border-color:transparent;color:#fff}.button.is-black.is-hovered,.button.is-black:hover{background-color:#040404;border-color:transparent;color:#fff}.button.is-black.is-focused,.button.is-black:focus{border-color:transparent;color:#fff}.button.is-black.is-focused:not(:active),.button.is-black:focus:not(:active){box-shadow:0 0 0 .125em rgba(10,10,10,.25)}.button.is-black.is-active,.button.is-black:active{background-color:#000;border-color:transparent;color:#fff}.button.is-black[disabled],fieldset[disabled] .button.is-black{background-color:#0a0a0a;border-color:transparent;box-shadow:none}.button.is-black.is-inverted{background-color:#fff;color:#0a0a0a}.button.is-black.is-inverted.is-hovered,.button.is-black.is-inverted:hover{background-color:#f2f2f2}.button.is-black.is-inverted[disabled],fieldset[disabled] .button.is-black.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#0a0a0a}.button.is-black.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-black.is-outlined{background-color:transparent;border-color:#0a0a0a;color:#0a0a0a}.button.is-black.is-outlined.is-focused,.button.is-black.is-outlined.is-hovered,.button.is-black.is-outlined:focus,.button.is-black.is-outlined:hover{background-color:#0a0a0a;border-color:#0a0a0a;color:#fff}.button.is-black.is-outlined.is-loading::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-black.is-outlined.is-loading.is-focused::after,.button.is-black.is-outlined.is-loading.is-hovered::after,.button.is-black.is-outlined.is-loading:focus::after,.button.is-black.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-black.is-outlined[disabled],fieldset[disabled] .button.is-black.is-outlined{background-color:transparent;border-color:#0a0a0a;box-shadow:none;color:#0a0a0a}.button.is-black.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-black.is-inverted.is-outlined.is-focused,.button.is-black.is-inverted.is-outlined.is-hovered,.button.is-black.is-inverted.is-outlined:focus,.button.is-black.is-inverted.is-outlined:hover{background-color:#fff;color:#0a0a0a}.button.is-black.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-black.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-black.is-inverted.is-outlined.is-loading:focus::after,.button.is-black.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-black.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-black.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-light{background-color:#f5f5f5;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light.is-hovered,.button.is-light:hover{background-color:#eee;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light.is-focused,.button.is-light:focus{border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light.is-focused:not(:active),.button.is-light:focus:not(:active){box-shadow:0 0 0 .125em rgba(245,245,245,.25)}.button.is-light.is-active,.button.is-light:active{background-color:#e8e8e8;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light[disabled],fieldset[disabled] .button.is-light{background-color:#f5f5f5;border-color:transparent;box-shadow:none}.button.is-light.is-inverted{background-color:rgba(0,0,0,.7);color:#f5f5f5}.button.is-light.is-inverted.is-hovered,.button.is-light.is-inverted:hover{background-color:rgba(0,0,0,.7)}.button.is-light.is-inverted[disabled],fieldset[disabled] .button.is-light.is-inverted{background-color:rgba(0,0,0,.7);border-color:transparent;box-shadow:none;color:#f5f5f5}.button.is-light.is-loading::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-light.is-outlined{background-color:transparent;border-color:#f5f5f5;color:#f5f5f5}.button.is-light.is-outlined.is-focused,.button.is-light.is-outlined.is-hovered,.button.is-light.is-outlined:focus,.button.is-light.is-outlined:hover{background-color:#f5f5f5;border-color:#f5f5f5;color:rgba(0,0,0,.7)}.button.is-light.is-outlined.is-loading::after{border-color:transparent transparent #f5f5f5 #f5f5f5!important}.button.is-light.is-outlined.is-loading.is-focused::after,.button.is-light.is-outlined.is-loading.is-hovered::after,.button.is-light.is-outlined.is-loading:focus::after,.button.is-light.is-outlined.is-loading:hover::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-light.is-outlined[disabled],fieldset[disabled] .button.is-light.is-outlined{background-color:transparent;border-color:#f5f5f5;box-shadow:none;color:#f5f5f5}.button.is-light.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);color:rgba(0,0,0,.7)}.button.is-light.is-inverted.is-outlined.is-focused,.button.is-light.is-inverted.is-outlined.is-hovered,.button.is-light.is-inverted.is-outlined:focus,.button.is-light.is-inverted.is-outlined:hover{background-color:rgba(0,0,0,.7);color:#f5f5f5}.button.is-light.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-light.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-light.is-inverted.is-outlined.is-loading:focus::after,.button.is-light.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #f5f5f5 #f5f5f5!important}.button.is-light.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-light.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);box-shadow:none;color:rgba(0,0,0,.7)}.button.is-dark{background-color:#363636;border-color:transparent;color:#fff}.button.is-dark.is-hovered,.button.is-dark:hover{background-color:#2f2f2f;border-color:transparent;color:#fff}.button.is-dark.is-focused,.button.is-dark:focus{border-color:transparent;color:#fff}.button.is-dark.is-focused:not(:active),.button.is-dark:focus:not(:active){box-shadow:0 0 0 .125em rgba(54,54,54,.25)}.button.is-dark.is-active,.button.is-dark:active{background-color:#292929;border-color:transparent;color:#fff}.button.is-dark[disabled],fieldset[disabled] .button.is-dark{background-color:#363636;border-color:transparent;box-shadow:none}.button.is-dark.is-inverted{background-color:#fff;color:#363636}.button.is-dark.is-inverted.is-hovered,.button.is-dark.is-inverted:hover{background-color:#f2f2f2}.button.is-dark.is-inverted[disabled],fieldset[disabled] .button.is-dark.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#363636}.button.is-dark.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-dark.is-outlined{background-color:transparent;border-color:#363636;color:#363636}.button.is-dark.is-outlined.is-focused,.button.is-dark.is-outlined.is-hovered,.button.is-dark.is-outlined:focus,.button.is-dark.is-outlined:hover{background-color:#363636;border-color:#363636;color:#fff}.button.is-dark.is-outlined.is-loading::after{border-color:transparent transparent #363636 #363636!important}.button.is-dark.is-outlined.is-loading.is-focused::after,.button.is-dark.is-outlined.is-loading.is-hovered::after,.button.is-dark.is-outlined.is-loading:focus::after,.button.is-dark.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-dark.is-outlined[disabled],fieldset[disabled] .button.is-dark.is-outlined{background-color:transparent;border-color:#363636;box-shadow:none;color:#363636}.button.is-dark.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-dark.is-inverted.is-outlined.is-focused,.button.is-dark.is-inverted.is-outlined.is-hovered,.button.is-dark.is-inverted.is-outlined:focus,.button.is-dark.is-inverted.is-outlined:hover{background-color:#fff;color:#363636}.button.is-dark.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-dark.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-dark.is-inverted.is-outlined.is-loading:focus::after,.button.is-dark.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #363636 #363636!important}.button.is-dark.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-dark.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-primary{background-color:#00d1b2;border-color:transparent;color:#fff}.button.is-primary.is-hovered,.button.is-primary:hover{background-color:#00c4a7;border-color:transparent;color:#fff}.button.is-primary.is-focused,.button.is-primary:focus{border-color:transparent;color:#fff}.button.is-primary.is-focused:not(:active),.button.is-primary:focus:not(:active){box-shadow:0 0 0 .125em rgba(0,209,178,.25)}.button.is-primary.is-active,.button.is-primary:active{background-color:#00b89c;border-color:transparent;color:#fff}.button.is-primary[disabled],fieldset[disabled] .button.is-primary{background-color:#00d1b2;border-color:transparent;box-shadow:none}.button.is-primary.is-inverted{background-color:#fff;color:#00d1b2}.button.is-primary.is-inverted.is-hovered,.button.is-primary.is-inverted:hover{background-color:#f2f2f2}.button.is-primary.is-inverted[disabled],fieldset[disabled] .button.is-primary.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#00d1b2}.button.is-primary.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-primary.is-outlined{background-color:transparent;border-color:#00d1b2;color:#00d1b2}.button.is-primary.is-outlined.is-focused,.button.is-primary.is-outlined.is-hovered,.button.is-primary.is-outlined:focus,.button.is-primary.is-outlined:hover{background-color:#00d1b2;border-color:#00d1b2;color:#fff}.button.is-primary.is-outlined.is-loading::after{border-color:transparent transparent #00d1b2 #00d1b2!important}.button.is-primary.is-outlined.is-loading.is-focused::after,.button.is-primary.is-outlined.is-loading.is-hovered::after,.button.is-primary.is-outlined.is-loading:focus::after,.button.is-primary.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-primary.is-outlined[disabled],fieldset[disabled] .button.is-primary.is-outlined{background-color:transparent;border-color:#00d1b2;box-shadow:none;color:#00d1b2}.button.is-primary.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-primary.is-inverted.is-outlined.is-focused,.button.is-primary.is-inverted.is-outlined.is-hovered,.button.is-primary.is-inverted.is-outlined:focus,.button.is-primary.is-inverted.is-outlined:hover{background-color:#fff;color:#00d1b2}.button.is-primary.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-primary.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-primary.is-inverted.is-outlined.is-loading:focus::after,.button.is-primary.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #00d1b2 #00d1b2!important}.button.is-primary.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-primary.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-primary.is-light{background-color:#ebfffc;color:#00947e}.button.is-primary.is-light.is-hovered,.button.is-primary.is-light:hover{background-color:#defffa;border-color:transparent;color:#00947e}.button.is-primary.is-light.is-active,.button.is-primary.is-light:active{background-color:#d1fff8;border-color:transparent;color:#00947e}.button.is-link{background-color:#3273dc;border-color:transparent;color:#fff}.button.is-link.is-hovered,.button.is-link:hover{background-color:#276cda;border-color:transparent;color:#fff}.button.is-link.is-focused,.button.is-link:focus{border-color:transparent;color:#fff}.button.is-link.is-focused:not(:active),.button.is-link:focus:not(:active){box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.button.is-link.is-active,.button.is-link:active{background-color:#2366d1;border-color:transparent;color:#fff}.button.is-link[disabled],fieldset[disabled] .button.is-link{background-color:#3273dc;border-color:transparent;box-shadow:none}.button.is-link.is-inverted{background-color:#fff;color:#3273dc}.button.is-link.is-inverted.is-hovered,.button.is-link.is-inverted:hover{background-color:#f2f2f2}.button.is-link.is-inverted[disabled],fieldset[disabled] .button.is-link.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#3273dc}.button.is-link.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-link.is-outlined{background-color:transparent;border-color:#3273dc;color:#3273dc}.button.is-link.is-outlined.is-focused,.button.is-link.is-outlined.is-hovered,.button.is-link.is-outlined:focus,.button.is-link.is-outlined:hover{background-color:#3273dc;border-color:#3273dc;color:#fff}.button.is-link.is-outlined.is-loading::after{border-color:transparent transparent #3273dc #3273dc!important}.button.is-link.is-outlined.is-loading.is-focused::after,.button.is-link.is-outlined.is-loading.is-hovered::after,.button.is-link.is-outlined.is-loading:focus::after,.button.is-link.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-link.is-outlined[disabled],fieldset[disabled] .button.is-link.is-outlined{background-color:transparent;border-color:#3273dc;box-shadow:none;color:#3273dc}.button.is-link.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-link.is-inverted.is-outlined.is-focused,.button.is-link.is-inverted.is-outlined.is-hovered,.button.is-link.is-inverted.is-outlined:focus,.button.is-link.is-inverted.is-outlined:hover{background-color:#fff;color:#3273dc}.button.is-link.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-link.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-link.is-inverted.is-outlined.is-loading:focus::after,.button.is-link.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #3273dc #3273dc!important}.button.is-link.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-link.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-link.is-light{background-color:#eef3fc;color:#2160c4}.button.is-link.is-light.is-hovered,.button.is-link.is-light:hover{background-color:#e3ecfa;border-color:transparent;color:#2160c4}.button.is-link.is-light.is-active,.button.is-link.is-light:active{background-color:#d8e4f8;border-color:transparent;color:#2160c4}.button.is-info{background-color:#3298dc;border-color:transparent;color:#fff}.button.is-info.is-hovered,.button.is-info:hover{background-color:#2793da;border-color:transparent;color:#fff}.button.is-info.is-focused,.button.is-info:focus{border-color:transparent;color:#fff}.button.is-info.is-focused:not(:active),.button.is-info:focus:not(:active){box-shadow:0 0 0 .125em rgba(50,152,220,.25)}.button.is-info.is-active,.button.is-info:active{background-color:#238cd1;border-color:transparent;color:#fff}.button.is-info[disabled],fieldset[disabled] .button.is-info{background-color:#3298dc;border-color:transparent;box-shadow:none}.button.is-info.is-inverted{background-color:#fff;color:#3298dc}.button.is-info.is-inverted.is-hovered,.button.is-info.is-inverted:hover{background-color:#f2f2f2}.button.is-info.is-inverted[disabled],fieldset[disabled] .button.is-info.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#3298dc}.button.is-info.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-info.is-outlined{background-color:transparent;border-color:#3298dc;color:#3298dc}.button.is-info.is-outlined.is-focused,.button.is-info.is-outlined.is-hovered,.button.is-info.is-outlined:focus,.button.is-info.is-outlined:hover{background-color:#3298dc;border-color:#3298dc;color:#fff}.button.is-info.is-outlined.is-loading::after{border-color:transparent transparent #3298dc #3298dc!important}.button.is-info.is-outlined.is-loading.is-focused::after,.button.is-info.is-outlined.is-loading.is-hovered::after,.button.is-info.is-outlined.is-loading:focus::after,.button.is-info.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-info.is-outlined[disabled],fieldset[disabled] .button.is-info.is-outlined{background-color:transparent;border-color:#3298dc;box-shadow:none;color:#3298dc}.button.is-info.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-info.is-inverted.is-outlined.is-focused,.button.is-info.is-inverted.is-outlined.is-hovered,.button.is-info.is-inverted.is-outlined:focus,.button.is-info.is-inverted.is-outlined:hover{background-color:#fff;color:#3298dc}.button.is-info.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-info.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-info.is-inverted.is-outlined.is-loading:focus::after,.button.is-info.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #3298dc #3298dc!important}.button.is-info.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-info.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-info.is-light{background-color:#eef6fc;color:#1d72aa}.button.is-info.is-light.is-hovered,.button.is-info.is-light:hover{background-color:#e3f1fa;border-color:transparent;color:#1d72aa}.button.is-info.is-light.is-active,.button.is-info.is-light:active{background-color:#d8ebf8;border-color:transparent;color:#1d72aa}.button.is-success{background-color:#48c774;border-color:transparent;color:#fff}.button.is-success.is-hovered,.button.is-success:hover{background-color:#3ec46d;border-color:transparent;color:#fff}.button.is-success.is-focused,.button.is-success:focus{border-color:transparent;color:#fff}.button.is-success.is-focused:not(:active),.button.is-success:focus:not(:active){box-shadow:0 0 0 .125em rgba(72,199,116,.25)}.button.is-success.is-active,.button.is-success:active{background-color:#3abb67;border-color:transparent;color:#fff}.button.is-success[disabled],fieldset[disabled] .button.is-success{background-color:#48c774;border-color:transparent;box-shadow:none}.button.is-success.is-inverted{background-color:#fff;color:#48c774}.button.is-success.is-inverted.is-hovered,.button.is-success.is-inverted:hover{background-color:#f2f2f2}.button.is-success.is-inverted[disabled],fieldset[disabled] .button.is-success.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#48c774}.button.is-success.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-success.is-outlined{background-color:transparent;border-color:#48c774;color:#48c774}.button.is-success.is-outlined.is-focused,.button.is-success.is-outlined.is-hovered,.button.is-success.is-outlined:focus,.button.is-success.is-outlined:hover{background-color:#48c774;border-color:#48c774;color:#fff}.button.is-success.is-outlined.is-loading::after{border-color:transparent transparent #48c774 #48c774!important}.button.is-success.is-outlined.is-loading.is-focused::after,.button.is-success.is-outlined.is-loading.is-hovered::after,.button.is-success.is-outlined.is-loading:focus::after,.button.is-success.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-success.is-outlined[disabled],fieldset[disabled] .button.is-success.is-outlined{background-color:transparent;border-color:#48c774;box-shadow:none;color:#48c774}.button.is-success.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-success.is-inverted.is-outlined.is-focused,.button.is-success.is-inverted.is-outlined.is-hovered,.button.is-success.is-inverted.is-outlined:focus,.button.is-success.is-inverted.is-outlined:hover{background-color:#fff;color:#48c774}.button.is-success.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-success.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-success.is-inverted.is-outlined.is-loading:focus::after,.button.is-success.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #48c774 #48c774!important}.button.is-success.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-success.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-success.is-light{background-color:#effaf3;color:#257942}.button.is-success.is-light.is-hovered,.button.is-success.is-light:hover{background-color:#e6f7ec;border-color:transparent;color:#257942}.button.is-success.is-light.is-active,.button.is-success.is-light:active{background-color:#dcf4e4;border-color:transparent;color:#257942}.button.is-warning{background-color:#ffdd57;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning.is-hovered,.button.is-warning:hover{background-color:#ffdb4a;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning.is-focused,.button.is-warning:focus{border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning.is-focused:not(:active),.button.is-warning:focus:not(:active){box-shadow:0 0 0 .125em rgba(255,221,87,.25)}.button.is-warning.is-active,.button.is-warning:active{background-color:#ffd83d;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning[disabled],fieldset[disabled] .button.is-warning{background-color:#ffdd57;border-color:transparent;box-shadow:none}.button.is-warning.is-inverted{background-color:rgba(0,0,0,.7);color:#ffdd57}.button.is-warning.is-inverted.is-hovered,.button.is-warning.is-inverted:hover{background-color:rgba(0,0,0,.7)}.button.is-warning.is-inverted[disabled],fieldset[disabled] .button.is-warning.is-inverted{background-color:rgba(0,0,0,.7);border-color:transparent;box-shadow:none;color:#ffdd57}.button.is-warning.is-loading::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-warning.is-outlined{background-color:transparent;border-color:#ffdd57;color:#ffdd57}.button.is-warning.is-outlined.is-focused,.button.is-warning.is-outlined.is-hovered,.button.is-warning.is-outlined:focus,.button.is-warning.is-outlined:hover{background-color:#ffdd57;border-color:#ffdd57;color:rgba(0,0,0,.7)}.button.is-warning.is-outlined.is-loading::after{border-color:transparent transparent #ffdd57 #ffdd57!important}.button.is-warning.is-outlined.is-loading.is-focused::after,.button.is-warning.is-outlined.is-loading.is-hovered::after,.button.is-warning.is-outlined.is-loading:focus::after,.button.is-warning.is-outlined.is-loading:hover::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-warning.is-outlined[disabled],fieldset[disabled] .button.is-warning.is-outlined{background-color:transparent;border-color:#ffdd57;box-shadow:none;color:#ffdd57}.button.is-warning.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);color:rgba(0,0,0,.7)}.button.is-warning.is-inverted.is-outlined.is-focused,.button.is-warning.is-inverted.is-outlined.is-hovered,.button.is-warning.is-inverted.is-outlined:focus,.button.is-warning.is-inverted.is-outlined:hover{background-color:rgba(0,0,0,.7);color:#ffdd57}.button.is-warning.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-warning.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-warning.is-inverted.is-outlined.is-loading:focus::after,.button.is-warning.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #ffdd57 #ffdd57!important}.button.is-warning.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-warning.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);box-shadow:none;color:rgba(0,0,0,.7)}.button.is-warning.is-light{background-color:#fffbeb;color:#947600}.button.is-warning.is-light.is-hovered,.button.is-warning.is-light:hover{background-color:#fff8de;border-color:transparent;color:#947600}.button.is-warning.is-light.is-active,.button.is-warning.is-light:active{background-color:#fff6d1;border-color:transparent;color:#947600}.button.is-danger{background-color:#f14668;border-color:transparent;color:#fff}.button.is-danger.is-hovered,.button.is-danger:hover{background-color:#f03a5f;border-color:transparent;color:#fff}.button.is-danger.is-focused,.button.is-danger:focus{border-color:transparent;color:#fff}.button.is-danger.is-focused:not(:active),.button.is-danger:focus:not(:active){box-shadow:0 0 0 .125em rgba(241,70,104,.25)}.button.is-danger.is-active,.button.is-danger:active{background-color:#ef2e55;border-color:transparent;color:#fff}.button.is-danger[disabled],fieldset[disabled] .button.is-danger{background-color:#f14668;border-color:transparent;box-shadow:none}.button.is-danger.is-inverted{background-color:#fff;color:#f14668}.button.is-danger.is-inverted.is-hovered,.button.is-danger.is-inverted:hover{background-color:#f2f2f2}.button.is-danger.is-inverted[disabled],fieldset[disabled] .button.is-danger.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#f14668}.button.is-danger.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-danger.is-outlined{background-color:transparent;border-color:#f14668;color:#f14668}.button.is-danger.is-outlined.is-focused,.button.is-danger.is-outlined.is-hovered,.button.is-danger.is-outlined:focus,.button.is-danger.is-outlined:hover{background-color:#f14668;border-color:#f14668;color:#fff}.button.is-danger.is-outlined.is-loading::after{border-color:transparent transparent #f14668 #f14668!important}.button.is-danger.is-outlined.is-loading.is-focused::after,.button.is-danger.is-outlined.is-loading.is-hovered::after,.button.is-danger.is-outlined.is-loading:focus::after,.button.is-danger.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-danger.is-outlined[disabled],fieldset[disabled] .button.is-danger.is-outlined{background-color:transparent;border-color:#f14668;box-shadow:none;color:#f14668}.button.is-danger.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-danger.is-inverted.is-outlined.is-focused,.button.is-danger.is-inverted.is-outlined.is-hovered,.button.is-danger.is-inverted.is-outlined:focus,.button.is-danger.is-inverted.is-outlined:hover{background-color:#fff;color:#f14668}.button.is-danger.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-danger.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-danger.is-inverted.is-outlined.is-loading:focus::after,.button.is-danger.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #f14668 #f14668!important}.button.is-danger.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-danger.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-danger.is-light{background-color:#feecf0;color:#cc0f35}.button.is-danger.is-light.is-hovered,.button.is-danger.is-light:hover{background-color:#fde0e6;border-color:transparent;color:#cc0f35}.button.is-danger.is-light.is-active,.button.is-danger.is-light:active{background-color:#fcd4dc;border-color:transparent;color:#cc0f35}.button.is-small{border-radius:2px;font-size:.75rem}.button.is-normal{font-size:1rem}.button.is-medium{font-size:1.25rem}.button.is-large{font-size:1.5rem}.button[disabled],fieldset[disabled] .button{background-color:#fff;border-color:#dbdbdb;box-shadow:none;opacity:.5}.button.is-fullwidth{display:flex;width:100%}.button.is-loading{color:transparent!important;pointer-events:none}.button.is-loading::after{position:absolute;left:calc(50% - (1em / 2));top:calc(50% - (1em / 2));position:absolute!important}.button.is-static{background-color:#f5f5f5;border-color:#dbdbdb;color:#7a7a7a;box-shadow:none;pointer-events:none}.button.is-rounded{border-radius:290486px;padding-left:calc(1em + .25em);padding-right:calc(1em + .25em)}.buttons{align-items:center;display:flex;flex-wrap:wrap;justify-content:flex-start}.buttons .button{margin-bottom:.5rem}.buttons .button:not(:last-child):not(.is-fullwidth){margin-right:.5rem}.buttons:last-child{margin-bottom:-.5rem}.buttons:not(:last-child){margin-bottom:1rem}.buttons.are-small .button:not(.is-normal):not(.is-medium):not(.is-large){border-radius:2px;font-size:.75rem}.buttons.are-medium .button:not(.is-small):not(.is-normal):not(.is-large){font-size:1.25rem}.buttons.are-large .button:not(.is-small):not(.is-normal):not(.is-medium){font-size:1.5rem}.buttons.has-addons .button:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.buttons.has-addons .button:not(:last-child){border-bottom-right-radius:0;border-top-right-radius:0;margin-right:-1px}.buttons.has-addons .button:last-child{margin-right:0}.buttons.has-addons .button.is-hovered,.buttons.has-addons .button:hover{z-index:2}.buttons.has-addons .button.is-active,.buttons.has-addons .button.is-focused,.buttons.has-addons .button.is-selected,.buttons.has-addons .button:active,.buttons.has-addons .button:focus{z-index:3}.buttons.has-addons .button.is-active:hover,.buttons.has-addons .button.is-focused:hover,.buttons.has-addons .button.is-selected:hover,.buttons.has-addons .button:active:hover,.buttons.has-addons .button:focus:hover{z-index:4}.buttons.has-addons .button.is-expanded{flex-grow:1;flex-shrink:1}.buttons.is-centered{justify-content:center}.buttons.is-centered:not(.has-addons) .button:not(.is-fullwidth){margin-left:.25rem;margin-right:.25rem}.buttons.is-right{justify-content:flex-end}.buttons.is-right:not(.has-addons) .button:not(.is-fullwidth){margin-left:.25rem;margin-right:.25rem}.container{flex-grow:1;margin:0 auto;position:relative;width:auto}.container.is-fluid{max-width:none!important;padding-left:32px;padding-right:32px;width:100%}@media screen and (min-width:1024px){.container{max-width:960px}}@media screen and (max-width:1215px){.container.is-widescreen:not(.is-max-desktop){max-width:1152px}}@media screen and (max-width:1407px){.container.is-fullhd:not(.is-max-desktop):not(.is-max-widescreen){max-width:1344px}}@media screen and (min-width:1216px){.container:not(.is-max-desktop){max-width:1152px}}@media screen and (min-width:1408px){.container:not(.is-max-desktop):not(.is-max-widescreen){max-width:1344px}}.content li+li{margin-top:.25em}.content blockquote:not(:last-child),.content dl:not(:last-child),.content ol:not(:last-child),.content p:not(:last-child),.content pre:not(:last-child),.content table:not(:last-child),.content ul:not(:last-child){margin-bottom:1em}.content h1,.content h2,.content h3,.content h4,.content h5,.content h6{color:#363636;font-weight:600;line-height:1.125}.content h1{font-size:2em;margin-bottom:.5em}.content h1:not(:first-child){margin-top:1em}.content h2{font-size:1.75em;margin-bottom:.5714em}.content h2:not(:first-child){margin-top:1.1428em}.content h3{font-size:1.5em;margin-bottom:.6666em}.content h3:not(:first-child){margin-top:1.3333em}.content h4{font-size:1.25em;margin-bottom:.8em}.content h5{font-size:1.125em;margin-bottom:.8888em}.content h6{font-size:1em;margin-bottom:1em}.content blockquote{background-color:#f5f5f5;border-left:5px solid #dbdbdb;padding:1.25em 1.5em}.content ol{list-style-position:outside;margin-left:2em;margin-top:1em}.content ol:not([type]){list-style-type:decimal}.content ol:not([type]).is-lower-alpha{list-style-type:lower-alpha}.content ol:not([type]).is-lower-roman{list-style-type:lower-roman}.content ol:not([type]).is-upper-alpha{list-style-type:upper-alpha}.content ol:not([type]).is-upper-roman{list-style-type:upper-roman}.content ul{list-style:disc outside;margin-left:2em;margin-top:1em}.content ul ul{list-style-type:circle;margin-top:.5em}.content ul ul ul{list-style-type:square}.content dd{margin-left:2em}.content figure{margin-left:2em;margin-right:2em;text-align:center}.content figure:not(:first-child){margin-top:2em}.content figure:not(:last-child){margin-bottom:2em}.content figure img{display:inline-block}.content figure figcaption{font-style:italic}.content pre{-webkit-overflow-scrolling:touch;overflow-x:auto;padding:1.25em 1.5em;white-space:pre;word-wrap:normal}.content sub,.content sup{font-size:75%}.content table{width:100%}.content table td,.content table th{border:1px solid #dbdbdb;border-width:0 0 1px;padding:.5em .75em;vertical-align:top}.content table th{color:#363636}.content table th:not([align]){text-align:inherit}.content table thead td,.content table thead th{border-width:0 0 2px;color:#363636}.content table tfoot td,.content table tfoot th{border-width:2px 0 0;color:#363636}.content table tbody tr:last-child td,.content table tbody tr:last-child th{border-bottom-width:0}.content .tabs li+li{margin-top:0}.content.is-small{font-size:.75rem}.content.is-medium{font-size:1.25rem}.content.is-large{font-size:1.5rem}.icon{align-items:center;display:inline-flex;justify-content:center;height:1.5rem;width:1.5rem}.icon.is-small{height:1rem;width:1rem}.icon.is-medium{height:2rem;width:2rem}.icon.is-large{height:3rem;width:3rem}.image{display:block;position:relative}.image img{display:block;height:auto;width:100%}.image img.is-rounded{border-radius:290486px}.image.is-fullwidth{width:100%}.image.is-16by9 .has-ratio,.image.is-16by9 img,.image.is-1by1 .has-ratio,.image.is-1by1 img,.image.is-1by2 .has-ratio,.image.is-1by2 img,.image.is-1by3 .has-ratio,.image.is-1by3 img,.image.is-2by1 .has-ratio,.image.is-2by1 img,.image.is-2by3 .has-ratio,.image.is-2by3 img,.image.is-3by1 .has-ratio,.image.is-3by1 img,.image.is-3by2 .has-ratio,.image.is-3by2 img,.image.is-3by4 .has-ratio,.image.is-3by4 img,.image.is-3by5 .has-ratio,.image.is-3by5 img,.image.is-4by3 .has-ratio,.image.is-4by3 img,.image.is-4by5 .has-ratio,.image.is-4by5 img,.image.is-5by3 .has-ratio,.image.is-5by3 img,.image.is-5by4 .has-ratio,.image.is-5by4 img,.image.is-9by16 .has-ratio,.image.is-9by16 img,.image.is-square .has-ratio,.image.is-square img{height:100%;width:100%}.image.is-1by1,.image.is-square{padding-top:100%}.image.is-5by4{padding-top:80%}.image.is-4by3{padding-top:75%}.image.is-3by2{padding-top:66.6666%}.image.is-5by3{padding-top:60%}.image.is-16by9{padding-top:56.25%}.image.is-2by1{padding-top:50%}.image.is-3by1{padding-top:33.3333%}.image.is-4by5{padding-top:125%}.image.is-3by4{padding-top:133.3333%}.image.is-2by3{padding-top:150%}.image.is-3by5{padding-top:166.6666%}.image.is-9by16{padding-top:177.7777%}.image.is-1by2{padding-top:200%}.image.is-1by3{padding-top:300%}.image.is-16x16{height:16px;width:16px}.image.is-24x24{height:24px;width:24px}.image.is-32x32{height:32px;width:32px}.image.is-48x48{height:48px;width:48px}.image.is-64x64{height:64px;width:64px}.image.is-96x96{height:96px;width:96px}.image.is-128x128{height:128px;width:128px}.notification{background-color:#f5f5f5;border-radius:4px;position:relative;padding:1.25rem 2.5rem 1.25rem 1.5rem}.notification a:not(.button):not(.dropdown-item){color:currentColor;text-decoration:underline}.notification strong{color:currentColor}.notification code,.notification pre{background:#fff}.notification pre code{background:0 0}.notification>.delete{right:.5rem;position:absolute;top:.5rem}.notification .content,.notification .subtitle,.notification .title{color:currentColor}.notification.is-white{background-color:#fff;color:#0a0a0a}.notification.is-black{background-color:#0a0a0a;color:#fff}.notification.is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.notification.is-dark{background-color:#363636;color:#fff}.notification.is-primary{background-color:#00d1b2;color:#fff}.notification.is-primary.is-light{background-color:#ebfffc;color:#00947e}.notification.is-link{background-color:#3273dc;color:#fff}.notification.is-link.is-light{background-color:#eef3fc;color:#2160c4}.notification.is-info{background-color:#3298dc;color:#fff}.notification.is-info.is-light{background-color:#eef6fc;color:#1d72aa}.notification.is-success{background-color:#48c774;color:#fff}.notification.is-success.is-light{background-color:#effaf3;color:#257942}.notification.is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.notification.is-warning.is-light{background-color:#fffbeb;color:#947600}.notification.is-danger{background-color:#f14668;color:#fff}.notification.is-danger.is-light{background-color:#feecf0;color:#cc0f35}.progress{-moz-appearance:none;-webkit-appearance:none;border:none;border-radius:290486px;display:block;height:1rem;overflow:hidden;padding:0;width:100%}.progress::-webkit-progress-bar{background-color:#ededed}.progress::-webkit-progress-value{background-color:#4a4a4a}.progress::-moz-progress-bar{background-color:#4a4a4a}.progress::-ms-fill{background-color:#4a4a4a;border:none}.progress.is-white::-webkit-progress-value{background-color:#fff}.progress.is-white::-moz-progress-bar{background-color:#fff}.progress.is-white::-ms-fill{background-color:#fff}.progress.is-white:indeterminate{background-image:linear-gradient(to right,#fff 30%,#ededed 30%)}.progress.is-black::-webkit-progress-value{background-color:#0a0a0a}.progress.is-black::-moz-progress-bar{background-color:#0a0a0a}.progress.is-black::-ms-fill{background-color:#0a0a0a}.progress.is-black:indeterminate{background-image:linear-gradient(to right,#0a0a0a 30%,#ededed 30%)}.progress.is-light::-webkit-progress-value{background-color:#f5f5f5}.progress.is-light::-moz-progress-bar{background-color:#f5f5f5}.progress.is-light::-ms-fill{background-color:#f5f5f5}.progress.is-light:indeterminate{background-image:linear-gradient(to right,#f5f5f5 30%,#ededed 30%)}.progress.is-dark::-webkit-progress-value{background-color:#363636}.progress.is-dark::-moz-progress-bar{background-color:#363636}.progress.is-dark::-ms-fill{background-color:#363636}.progress.is-dark:indeterminate{background-image:linear-gradient(to right,#363636 30%,#ededed 30%)}.progress.is-primary::-webkit-progress-value{background-color:#00d1b2}.progress.is-primary::-moz-progress-bar{background-color:#00d1b2}.progress.is-primary::-ms-fill{background-color:#00d1b2}.progress.is-primary:indeterminate{background-image:linear-gradient(to right,#00d1b2 30%,#ededed 30%)}.progress.is-link::-webkit-progress-value{background-color:#3273dc}.progress.is-link::-moz-progress-bar{background-color:#3273dc}.progress.is-link::-ms-fill{background-color:#3273dc}.progress.is-link:indeterminate{background-image:linear-gradient(to right,#3273dc 30%,#ededed 30%)}.progress.is-info::-webkit-progress-value{background-color:#3298dc}.progress.is-info::-moz-progress-bar{background-color:#3298dc}.progress.is-info::-ms-fill{background-color:#3298dc}.progress.is-info:indeterminate{background-image:linear-gradient(to right,#3298dc 30%,#ededed 30%)}.progress.is-success::-webkit-progress-value{background-color:#48c774}.progress.is-success::-moz-progress-bar{background-color:#48c774}.progress.is-success::-ms-fill{background-color:#48c774}.progress.is-success:indeterminate{background-image:linear-gradient(to right,#48c774 30%,#ededed 30%)}.progress.is-warning::-webkit-progress-value{background-color:#ffdd57}.progress.is-warning::-moz-progress-bar{background-color:#ffdd57}.progress.is-warning::-ms-fill{background-color:#ffdd57}.progress.is-warning:indeterminate{background-image:linear-gradient(to right,#ffdd57 30%,#ededed 30%)}.progress.is-danger::-webkit-progress-value{background-color:#f14668}.progress.is-danger::-moz-progress-bar{background-color:#f14668}.progress.is-danger::-ms-fill{background-color:#f14668}.progress.is-danger:indeterminate{background-image:linear-gradient(to right,#f14668 30%,#ededed 30%)}.progress:indeterminate{-webkit-animation-duration:1.5s;animation-duration:1.5s;-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite;-webkit-animation-name:moveIndeterminate;animation-name:moveIndeterminate;-webkit-animation-timing-function:linear;animation-timing-function:linear;background-color:#ededed;background-image:linear-gradient(to right,#4a4a4a 30%,#ededed 30%);background-position:top left;background-repeat:no-repeat;background-size:150% 150%}.progress:indeterminate::-webkit-progress-bar{background-color:transparent}.progress:indeterminate::-moz-progress-bar{background-color:transparent}.progress:indeterminate::-ms-fill{animation-name:none}.progress.is-small{height:.75rem}.progress.is-medium{height:1.25rem}.progress.is-large{height:1.5rem}@-webkit-keyframes moveIndeterminate{from{background-position:200% 0}to{background-position:-200% 0}}@keyframes moveIndeterminate{from{background-position:200% 0}to{background-position:-200% 0}}.table{background-color:#fff;color:#363636}.table td,.table th{border:1px solid #dbdbdb;border-width:0 0 1px;padding:.5em .75em;vertical-align:top}.table td.is-white,.table th.is-white{background-color:#fff;border-color:#fff;color:#0a0a0a}.table td.is-black,.table th.is-black{background-color:#0a0a0a;border-color:#0a0a0a;color:#fff}.table td.is-light,.table th.is-light{background-color:#f5f5f5;border-color:#f5f5f5;color:rgba(0,0,0,.7)}.table td.is-dark,.table th.is-dark{background-color:#363636;border-color:#363636;color:#fff}.table td.is-primary,.table th.is-primary{background-color:#00d1b2;border-color:#00d1b2;color:#fff}.table td.is-link,.table th.is-link{background-color:#3273dc;border-color:#3273dc;color:#fff}.table td.is-info,.table th.is-info{background-color:#3298dc;border-color:#3298dc;color:#fff}.table td.is-success,.table th.is-success{background-color:#48c774;border-color:#48c774;color:#fff}.table td.is-warning,.table th.is-warning{background-color:#ffdd57;border-color:#ffdd57;color:rgba(0,0,0,.7)}.table td.is-danger,.table th.is-danger{background-color:#f14668;border-color:#f14668;color:#fff}.table td.is-narrow,.table th.is-narrow{white-space:nowrap;width:1%}.table td.is-selected,.table th.is-selected{background-color:#00d1b2;color:#fff}.table td.is-selected a,.table td.is-selected strong,.table th.is-selected a,.table th.is-selected strong{color:currentColor}.table td.is-vcentered,.table th.is-vcentered{vertical-align:middle}.table th{color:#363636}.table th:not([align]){text-align:inherit}.table tr.is-selected{background-color:#00d1b2;color:#fff}.table tr.is-selected a,.table tr.is-selected strong{color:currentColor}.table tr.is-selected td,.table tr.is-selected th{border-color:#fff;color:currentColor}.table thead{background-color:transparent}.table thead td,.table thead th{border-width:0 0 2px;color:#363636}.table tfoot{background-color:transparent}.table tfoot td,.table tfoot th{border-width:2px 0 0;color:#363636}.table tbody{background-color:transparent}.table tbody tr:last-child td,.table tbody tr:last-child th{border-bottom-width:0}.table.is-bordered td,.table.is-bordered th{border-width:1px}.table.is-bordered tr:last-child td,.table.is-bordered tr:last-child th{border-bottom-width:1px}.table.is-fullwidth{width:100%}.table.is-hoverable tbody tr:not(.is-selected):hover{background-color:#fafafa}.table.is-hoverable.is-striped tbody tr:not(.is-selected):hover{background-color:#fafafa}.table.is-hoverable.is-striped tbody tr:not(.is-selected):hover:nth-child(even){background-color:#f5f5f5}.table.is-narrow td,.table.is-narrow th{padding:.25em .5em}.table.is-striped tbody tr:not(.is-selected):nth-child(even){background-color:#fafafa}.table-container{-webkit-overflow-scrolling:touch;overflow:auto;overflow-y:hidden;max-width:100%}.tags{align-items:center;display:flex;flex-wrap:wrap;justify-content:flex-start}.tags .tag{margin-bottom:.5rem}.tags .tag:not(:last-child){margin-right:.5rem}.tags:last-child{margin-bottom:-.5rem}.tags:not(:last-child){margin-bottom:1rem}.tags.are-medium .tag:not(.is-normal):not(.is-large){font-size:1rem}.tags.are-large .tag:not(.is-normal):not(.is-medium){font-size:1.25rem}.tags.is-centered{justify-content:center}.tags.is-centered .tag{margin-right:.25rem;margin-left:.25rem}.tags.is-right{justify-content:flex-end}.tags.is-right .tag:not(:first-child){margin-left:.5rem}.tags.is-right .tag:not(:last-child){margin-right:0}.tags.has-addons .tag{margin-right:0}.tags.has-addons .tag:not(:first-child){margin-left:0;border-top-left-radius:0;border-bottom-left-radius:0}.tags.has-addons .tag:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.tag:not(body){align-items:center;background-color:#f5f5f5;border-radius:4px;color:#4a4a4a;display:inline-flex;font-size:.75rem;height:2em;justify-content:center;line-height:1.5;padding-left:.75em;padding-right:.75em;white-space:nowrap}.tag:not(body) .delete{margin-left:.25rem;margin-right:-.375rem}.tag:not(body).is-white{background-color:#fff;color:#0a0a0a}.tag:not(body).is-black{background-color:#0a0a0a;color:#fff}.tag:not(body).is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.tag:not(body).is-dark{background-color:#363636;color:#fff}.tag:not(body).is-primary{background-color:#00d1b2;color:#fff}.tag:not(body).is-primary.is-light{background-color:#ebfffc;color:#00947e}.tag:not(body).is-link{background-color:#3273dc;color:#fff}.tag:not(body).is-link.is-light{background-color:#eef3fc;color:#2160c4}.tag:not(body).is-info{background-color:#3298dc;color:#fff}.tag:not(body).is-info.is-light{background-color:#eef6fc;color:#1d72aa}.tag:not(body).is-success{background-color:#48c774;color:#fff}.tag:not(body).is-success.is-light{background-color:#effaf3;color:#257942}.tag:not(body).is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.tag:not(body).is-warning.is-light{background-color:#fffbeb;color:#947600}.tag:not(body).is-danger{background-color:#f14668;color:#fff}.tag:not(body).is-danger.is-light{background-color:#feecf0;color:#cc0f35}.tag:not(body).is-normal{font-size:.75rem}.tag:not(body).is-medium{font-size:1rem}.tag:not(body).is-large{font-size:1.25rem}.tag:not(body) .icon:first-child:not(:last-child){margin-left:-.375em;margin-right:.1875em}.tag:not(body) .icon:last-child:not(:first-child){margin-left:.1875em;margin-right:-.375em}.tag:not(body) .icon:first-child:last-child{margin-left:-.375em;margin-right:-.375em}.tag:not(body).is-delete{margin-left:1px;padding:0;position:relative;width:2em}.tag:not(body).is-delete::after,.tag:not(body).is-delete::before{background-color:currentColor;content:"";display:block;left:50%;position:absolute;top:50%;transform:translateX(-50%) translateY(-50%) rotate(45deg);transform-origin:center center}.tag:not(body).is-delete::before{height:1px;width:50%}.tag:not(body).is-delete::after{height:50%;width:1px}.tag:not(body).is-delete:focus,.tag:not(body).is-delete:hover{background-color:#e8e8e8}.tag:not(body).is-delete:active{background-color:#dbdbdb}.tag:not(body).is-rounded{border-radius:290486px}a.tag:hover{text-decoration:underline}.subtitle,.title{word-break:break-word}.subtitle em,.subtitle span,.title em,.title span{font-weight:inherit}.subtitle sub,.title sub{font-size:.75em}.subtitle sup,.title sup{font-size:.75em}.subtitle .tag,.title .tag{vertical-align:middle}.title{color:#363636;font-size:2rem;font-weight:600;line-height:1.125}.title strong{color:inherit;font-weight:inherit}.title+.highlight{margin-top:-.75rem}.title:not(.is-spaced)+.subtitle{margin-top:-1.25rem}.title.is-1{font-size:3rem}.title.is-2{font-size:2.5rem}.title.is-3{font-size:2rem}.title.is-4{font-size:1.5rem}.title.is-5{font-size:1.25rem}.title.is-6{font-size:1rem}.title.is-7{font-size:.75rem}.subtitle{color:#4a4a4a;font-size:1.25rem;font-weight:400;line-height:1.25}.subtitle strong{color:#363636;font-weight:600}.subtitle:not(.is-spaced)+.title{margin-top:-1.25rem}.subtitle.is-1{font-size:3rem}.subtitle.is-2{font-size:2.5rem}.subtitle.is-3{font-size:2rem}.subtitle.is-4{font-size:1.5rem}.subtitle.is-5{font-size:1.25rem}.subtitle.is-6{font-size:1rem}.subtitle.is-7{font-size:.75rem}.heading{display:block;font-size:11px;letter-spacing:1px;margin-bottom:5px;text-transform:uppercase}.highlight{font-weight:400;max-width:100%;overflow:hidden;padding:0}.highlight pre{overflow:auto;max-width:100%}.number{align-items:center;background-color:#f5f5f5;border-radius:290486px;display:inline-flex;font-size:1.25rem;height:2em;justify-content:center;margin-right:1.5rem;min-width:2.5em;padding:.25rem .5rem;text-align:center;vertical-align:top}.input,.select select,.textarea{background-color:#fff;border-color:#dbdbdb;border-radius:4px;color:#363636}.input::-moz-placeholder,.select select::-moz-placeholder,.textarea::-moz-placeholder{color:rgba(54,54,54,.3)}.input::-webkit-input-placeholder,.select select::-webkit-input-placeholder,.textarea::-webkit-input-placeholder{color:rgba(54,54,54,.3)}.input:-moz-placeholder,.select select:-moz-placeholder,.textarea:-moz-placeholder{color:rgba(54,54,54,.3)}.input:-ms-input-placeholder,.select select:-ms-input-placeholder,.textarea:-ms-input-placeholder{color:rgba(54,54,54,.3)}.input:hover,.is-hovered.input,.is-hovered.textarea,.select select.is-hovered,.select select:hover,.textarea:hover{border-color:#b5b5b5}.input:active,.input:focus,.is-active.input,.is-active.textarea,.is-focused.input,.is-focused.textarea,.select select.is-active,.select select.is-focused,.select select:active,.select select:focus,.textarea:active,.textarea:focus{border-color:#3273dc;box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.input[disabled],.select fieldset[disabled] select,.select select[disabled],.textarea[disabled],fieldset[disabled] .input,fieldset[disabled] .select select,fieldset[disabled] .textarea{background-color:#f5f5f5;border-color:#f5f5f5;box-shadow:none;color:#7a7a7a}.input[disabled]::-moz-placeholder,.select fieldset[disabled] select::-moz-placeholder,.select select[disabled]::-moz-placeholder,.textarea[disabled]::-moz-placeholder,fieldset[disabled] .input::-moz-placeholder,fieldset[disabled] .select select::-moz-placeholder,fieldset[disabled] .textarea::-moz-placeholder{color:rgba(122,122,122,.3)}.input[disabled]::-webkit-input-placeholder,.select fieldset[disabled] select::-webkit-input-placeholder,.select select[disabled]::-webkit-input-placeholder,.textarea[disabled]::-webkit-input-placeholder,fieldset[disabled] .input::-webkit-input-placeholder,fieldset[disabled] .select select::-webkit-input-placeholder,fieldset[disabled] .textarea::-webkit-input-placeholder{color:rgba(122,122,122,.3)}.input[disabled]:-moz-placeholder,.select fieldset[disabled] select:-moz-placeholder,.select select[disabled]:-moz-placeholder,.textarea[disabled]:-moz-placeholder,fieldset[disabled] .input:-moz-placeholder,fieldset[disabled] .select select:-moz-placeholder,fieldset[disabled] .textarea:-moz-placeholder{color:rgba(122,122,122,.3)}.input[disabled]:-ms-input-placeholder,.select fieldset[disabled] select:-ms-input-placeholder,.select select[disabled]:-ms-input-placeholder,.textarea[disabled]:-ms-input-placeholder,fieldset[disabled] .input:-ms-input-placeholder,fieldset[disabled] .select select:-ms-input-placeholder,fieldset[disabled] .textarea:-ms-input-placeholder{color:rgba(122,122,122,.3)}.input,.textarea{box-shadow:inset 0 .0625em .125em rgba(10,10,10,.05);max-width:100%;width:100%}.input[readonly],.textarea[readonly]{box-shadow:none}.is-white.input,.is-white.textarea{border-color:#fff}.is-white.input:active,.is-white.input:focus,.is-white.is-active.input,.is-white.is-active.textarea,.is-white.is-focused.input,.is-white.is-focused.textarea,.is-white.textarea:active,.is-white.textarea:focus{box-shadow:0 0 0 .125em rgba(255,255,255,.25)}.is-black.input,.is-black.textarea{border-color:#0a0a0a}.is-black.input:active,.is-black.input:focus,.is-black.is-active.input,.is-black.is-active.textarea,.is-black.is-focused.input,.is-black.is-focused.textarea,.is-black.textarea:active,.is-black.textarea:focus{box-shadow:0 0 0 .125em rgba(10,10,10,.25)}.is-light.input,.is-light.textarea{border-color:#f5f5f5}.is-light.input:active,.is-light.input:focus,.is-light.is-active.input,.is-light.is-active.textarea,.is-light.is-focused.input,.is-light.is-focused.textarea,.is-light.textarea:active,.is-light.textarea:focus{box-shadow:0 0 0 .125em rgba(245,245,245,.25)}.is-dark.input,.is-dark.textarea{border-color:#363636}.is-dark.input:active,.is-dark.input:focus,.is-dark.is-active.input,.is-dark.is-active.textarea,.is-dark.is-focused.input,.is-dark.is-focused.textarea,.is-dark.textarea:active,.is-dark.textarea:focus{box-shadow:0 0 0 .125em rgba(54,54,54,.25)}.is-primary.input,.is-primary.textarea{border-color:#00d1b2}.is-primary.input:active,.is-primary.input:focus,.is-primary.is-active.input,.is-primary.is-active.textarea,.is-primary.is-focused.input,.is-primary.is-focused.textarea,.is-primary.textarea:active,.is-primary.textarea:focus{box-shadow:0 0 0 .125em rgba(0,209,178,.25)}.is-link.input,.is-link.textarea{border-color:#3273dc}.is-link.input:active,.is-link.input:focus,.is-link.is-active.input,.is-link.is-active.textarea,.is-link.is-focused.input,.is-link.is-focused.textarea,.is-link.textarea:active,.is-link.textarea:focus{box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.is-info.input,.is-info.textarea{border-color:#3298dc}.is-info.input:active,.is-info.input:focus,.is-info.is-active.input,.is-info.is-active.textarea,.is-info.is-focused.input,.is-info.is-focused.textarea,.is-info.textarea:active,.is-info.textarea:focus{box-shadow:0 0 0 .125em rgba(50,152,220,.25)}.is-success.input,.is-success.textarea{border-color:#48c774}.is-success.input:active,.is-success.input:focus,.is-success.is-active.input,.is-success.is-active.textarea,.is-success.is-focused.input,.is-success.is-focused.textarea,.is-success.textarea:active,.is-success.textarea:focus{box-shadow:0 0 0 .125em rgba(72,199,116,.25)}.is-warning.input,.is-warning.textarea{border-color:#ffdd57}.is-warning.input:active,.is-warning.input:focus,.is-warning.is-active.input,.is-warning.is-active.textarea,.is-warning.is-focused.input,.is-warning.is-focused.textarea,.is-warning.textarea:active,.is-warning.textarea:focus{box-shadow:0 0 0 .125em rgba(255,221,87,.25)}.is-danger.input,.is-danger.textarea{border-color:#f14668}.is-danger.input:active,.is-danger.input:focus,.is-danger.is-active.input,.is-danger.is-active.textarea,.is-danger.is-focused.input,.is-danger.is-focused.textarea,.is-danger.textarea:active,.is-danger.textarea:focus{box-shadow:0 0 0 .125em rgba(241,70,104,.25)}.is-small.input,.is-small.textarea{border-radius:2px;font-size:.75rem}.is-medium.input,.is-medium.textarea{font-size:1.25rem}.is-large.input,.is-large.textarea{font-size:1.5rem}.is-fullwidth.input,.is-fullwidth.textarea{display:block;width:100%}.is-inline.input,.is-inline.textarea{display:inline;width:auto}.input.is-rounded{border-radius:290486px;padding-left:calc(calc(.75em - 1px) + .375em);padding-right:calc(calc(.75em - 1px) + .375em)}.input.is-static{background-color:transparent;border-color:transparent;box-shadow:none;padding-left:0;padding-right:0}.textarea{display:block;max-width:100%;min-width:100%;padding:calc(.75em - 1px);resize:vertical}.textarea:not([rows]){max-height:40em;min-height:8em}.textarea[rows]{height:initial}.textarea.has-fixed-size{resize:none}.checkbox,.radio{cursor:pointer;display:inline-block;line-height:1.25;position:relative}.checkbox input,.radio input{cursor:pointer}.checkbox:hover,.radio:hover{color:#363636}.checkbox input[disabled],.checkbox[disabled],.radio input[disabled],.radio[disabled],fieldset[disabled] .checkbox,fieldset[disabled] .radio{color:#7a7a7a;cursor:not-allowed}.radio+.radio{margin-left:.5em}.select{display:inline-block;max-width:100%;position:relative;vertical-align:top}.select:not(.is-multiple){height:2.5em}.select:not(.is-multiple):not(.is-loading)::after{border-color:#3273dc;right:1.125em;z-index:4}.select.is-rounded select{border-radius:290486px;padding-left:1em}.select select{cursor:pointer;display:block;font-size:1em;max-width:100%;outline:0}.select select::-ms-expand{display:none}.select select[disabled]:hover,fieldset[disabled] .select select:hover{border-color:#f5f5f5}.select select:not([multiple]){padding-right:2.5em}.select select[multiple]{height:auto;padding:0}.select select[multiple] option{padding:.5em 1em}.select:not(.is-multiple):not(.is-loading):hover::after{border-color:#363636}.select.is-white:not(:hover)::after{border-color:#fff}.select.is-white select{border-color:#fff}.select.is-white select.is-hovered,.select.is-white select:hover{border-color:#f2f2f2}.select.is-white select.is-active,.select.is-white select.is-focused,.select.is-white select:active,.select.is-white select:focus{box-shadow:0 0 0 .125em rgba(255,255,255,.25)}.select.is-black:not(:hover)::after{border-color:#0a0a0a}.select.is-black select{border-color:#0a0a0a}.select.is-black select.is-hovered,.select.is-black select:hover{border-color:#000}.select.is-black select.is-active,.select.is-black select.is-focused,.select.is-black select:active,.select.is-black select:focus{box-shadow:0 0 0 .125em rgba(10,10,10,.25)}.select.is-light:not(:hover)::after{border-color:#f5f5f5}.select.is-light select{border-color:#f5f5f5}.select.is-light select.is-hovered,.select.is-light select:hover{border-color:#e8e8e8}.select.is-light select.is-active,.select.is-light select.is-focused,.select.is-light select:active,.select.is-light select:focus{box-shadow:0 0 0 .125em rgba(245,245,245,.25)}.select.is-dark:not(:hover)::after{border-color:#363636}.select.is-dark select{border-color:#363636}.select.is-dark select.is-hovered,.select.is-dark select:hover{border-color:#292929}.select.is-dark select.is-active,.select.is-dark select.is-focused,.select.is-dark select:active,.select.is-dark select:focus{box-shadow:0 0 0 .125em rgba(54,54,54,.25)}.select.is-primary:not(:hover)::after{border-color:#00d1b2}.select.is-primary select{border-color:#00d1b2}.select.is-primary select.is-hovered,.select.is-primary select:hover{border-color:#00b89c}.select.is-primary select.is-active,.select.is-primary select.is-focused,.select.is-primary select:active,.select.is-primary select:focus{box-shadow:0 0 0 .125em rgba(0,209,178,.25)}.select.is-link:not(:hover)::after{border-color:#3273dc}.select.is-link select{border-color:#3273dc}.select.is-link select.is-hovered,.select.is-link select:hover{border-color:#2366d1}.select.is-link select.is-active,.select.is-link select.is-focused,.select.is-link select:active,.select.is-link select:focus{box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.select.is-info:not(:hover)::after{border-color:#3298dc}.select.is-info select{border-color:#3298dc}.select.is-info select.is-hovered,.select.is-info select:hover{border-color:#238cd1}.select.is-info select.is-active,.select.is-info select.is-focused,.select.is-info select:active,.select.is-info select:focus{box-shadow:0 0 0 .125em rgba(50,152,220,.25)}.select.is-success:not(:hover)::after{border-color:#48c774}.select.is-success select{border-color:#48c774}.select.is-success select.is-hovered,.select.is-success select:hover{border-color:#3abb67}.select.is-success select.is-active,.select.is-success select.is-focused,.select.is-success select:active,.select.is-success select:focus{box-shadow:0 0 0 .125em rgba(72,199,116,.25)}.select.is-warning:not(:hover)::after{border-color:#ffdd57}.select.is-warning select{border-color:#ffdd57}.select.is-warning select.is-hovered,.select.is-warning select:hover{border-color:#ffd83d}.select.is-warning select.is-active,.select.is-warning select.is-focused,.select.is-warning select:active,.select.is-warning select:focus{box-shadow:0 0 0 .125em rgba(255,221,87,.25)}.select.is-danger:not(:hover)::after{border-color:#f14668}.select.is-danger select{border-color:#f14668}.select.is-danger select.is-hovered,.select.is-danger select:hover{border-color:#ef2e55}.select.is-danger select.is-active,.select.is-danger select.is-focused,.select.is-danger select:active,.select.is-danger select:focus{box-shadow:0 0 0 .125em rgba(241,70,104,.25)}.select.is-small{border-radius:2px;font-size:.75rem}.select.is-medium{font-size:1.25rem}.select.is-large{font-size:1.5rem}.select.is-disabled::after{border-color:#7a7a7a}.select.is-fullwidth{width:100%}.select.is-fullwidth select{width:100%}.select.is-loading::after{margin-top:0;position:absolute;right:.625em;top:.625em;transform:none}.select.is-loading.is-small:after{font-size:.75rem}.select.is-loading.is-medium:after{font-size:1.25rem}.select.is-loading.is-large:after{font-size:1.5rem}.file{align-items:stretch;display:flex;justify-content:flex-start;position:relative}.file.is-white .file-cta{background-color:#fff;border-color:transparent;color:#0a0a0a}.file.is-white.is-hovered .file-cta,.file.is-white:hover .file-cta{background-color:#f9f9f9;border-color:transparent;color:#0a0a0a}.file.is-white.is-focused .file-cta,.file.is-white:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(255,255,255,.25);color:#0a0a0a}.file.is-white.is-active .file-cta,.file.is-white:active .file-cta{background-color:#f2f2f2;border-color:transparent;color:#0a0a0a}.file.is-black .file-cta{background-color:#0a0a0a;border-color:transparent;color:#fff}.file.is-black.is-hovered .file-cta,.file.is-black:hover .file-cta{background-color:#040404;border-color:transparent;color:#fff}.file.is-black.is-focused .file-cta,.file.is-black:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(10,10,10,.25);color:#fff}.file.is-black.is-active .file-cta,.file.is-black:active .file-cta{background-color:#000;border-color:transparent;color:#fff}.file.is-light .file-cta{background-color:#f5f5f5;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-light.is-hovered .file-cta,.file.is-light:hover .file-cta{background-color:#eee;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-light.is-focused .file-cta,.file.is-light:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(245,245,245,.25);color:rgba(0,0,0,.7)}.file.is-light.is-active .file-cta,.file.is-light:active .file-cta{background-color:#e8e8e8;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-dark .file-cta{background-color:#363636;border-color:transparent;color:#fff}.file.is-dark.is-hovered .file-cta,.file.is-dark:hover .file-cta{background-color:#2f2f2f;border-color:transparent;color:#fff}.file.is-dark.is-focused .file-cta,.file.is-dark:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(54,54,54,.25);color:#fff}.file.is-dark.is-active .file-cta,.file.is-dark:active .file-cta{background-color:#292929;border-color:transparent;color:#fff}.file.is-primary .file-cta{background-color:#00d1b2;border-color:transparent;color:#fff}.file.is-primary.is-hovered .file-cta,.file.is-primary:hover .file-cta{background-color:#00c4a7;border-color:transparent;color:#fff}.file.is-primary.is-focused .file-cta,.file.is-primary:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(0,209,178,.25);color:#fff}.file.is-primary.is-active .file-cta,.file.is-primary:active .file-cta{background-color:#00b89c;border-color:transparent;color:#fff}.file.is-link .file-cta{background-color:#3273dc;border-color:transparent;color:#fff}.file.is-link.is-hovered .file-cta,.file.is-link:hover .file-cta{background-color:#276cda;border-color:transparent;color:#fff}.file.is-link.is-focused .file-cta,.file.is-link:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(50,115,220,.25);color:#fff}.file.is-link.is-active .file-cta,.file.is-link:active .file-cta{background-color:#2366d1;border-color:transparent;color:#fff}.file.is-info .file-cta{background-color:#3298dc;border-color:transparent;color:#fff}.file.is-info.is-hovered .file-cta,.file.is-info:hover .file-cta{background-color:#2793da;border-color:transparent;color:#fff}.file.is-info.is-focused .file-cta,.file.is-info:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(50,152,220,.25);color:#fff}.file.is-info.is-active .file-cta,.file.is-info:active .file-cta{background-color:#238cd1;border-color:transparent;color:#fff}.file.is-success .file-cta{background-color:#48c774;border-color:transparent;color:#fff}.file.is-success.is-hovered .file-cta,.file.is-success:hover .file-cta{background-color:#3ec46d;border-color:transparent;color:#fff}.file.is-success.is-focused .file-cta,.file.is-success:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(72,199,116,.25);color:#fff}.file.is-success.is-active .file-cta,.file.is-success:active .file-cta{background-color:#3abb67;border-color:transparent;color:#fff}.file.is-warning .file-cta{background-color:#ffdd57;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-warning.is-hovered .file-cta,.file.is-warning:hover .file-cta{background-color:#ffdb4a;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-warning.is-focused .file-cta,.file.is-warning:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(255,221,87,.25);color:rgba(0,0,0,.7)}.file.is-warning.is-active .file-cta,.file.is-warning:active .file-cta{background-color:#ffd83d;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-danger .file-cta{background-color:#f14668;border-color:transparent;color:#fff}.file.is-danger.is-hovered .file-cta,.file.is-danger:hover .file-cta{background-color:#f03a5f;border-color:transparent;color:#fff}.file.is-danger.is-focused .file-cta,.file.is-danger:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(241,70,104,.25);color:#fff}.file.is-danger.is-active .file-cta,.file.is-danger:active .file-cta{background-color:#ef2e55;border-color:transparent;color:#fff}.file.is-small{font-size:.75rem}.file.is-medium{font-size:1.25rem}.file.is-medium .file-icon .fa{font-size:21px}.file.is-large{font-size:1.5rem}.file.is-large .file-icon .fa{font-size:28px}.file.has-name .file-cta{border-bottom-right-radius:0;border-top-right-radius:0}.file.has-name .file-name{border-bottom-left-radius:0;border-top-left-radius:0}.file.has-name.is-empty .file-cta{border-radius:4px}.file.has-name.is-empty .file-name{display:none}.file.is-boxed .file-label{flex-direction:column}.file.is-boxed .file-cta{flex-direction:column;height:auto;padding:1em 3em}.file.is-boxed .file-name{border-width:0 1px 1px}.file.is-boxed .file-icon{height:1.5em;width:1.5em}.file.is-boxed .file-icon .fa{font-size:21px}.file.is-boxed.is-small .file-icon .fa{font-size:14px}.file.is-boxed.is-medium .file-icon .fa{font-size:28px}.file.is-boxed.is-large .file-icon .fa{font-size:35px}.file.is-boxed.has-name .file-cta{border-radius:4px 4px 0 0}.file.is-boxed.has-name .file-name{border-radius:0 0 4px 4px;border-width:0 1px 1px}.file.is-centered{justify-content:center}.file.is-fullwidth .file-label{width:100%}.file.is-fullwidth .file-name{flex-grow:1;max-width:none}.file.is-right{justify-content:flex-end}.file.is-right .file-cta{border-radius:0 4px 4px 0}.file.is-right .file-name{border-radius:4px 0 0 4px;border-width:1px 0 1px 1px;order:-1}.file-label{align-items:stretch;display:flex;cursor:pointer;justify-content:flex-start;overflow:hidden;position:relative}.file-label:hover .file-cta{background-color:#eee;color:#363636}.file-label:hover .file-name{border-color:#d5d5d5}.file-label:active .file-cta{background-color:#e8e8e8;color:#363636}.file-label:active .file-name{border-color:#cfcfcf}.file-input{height:100%;left:0;opacity:0;outline:0;position:absolute;top:0;width:100%}.file-cta,.file-name{border-color:#dbdbdb;border-radius:4px;font-size:1em;padding-left:1em;padding-right:1em;white-space:nowrap}.file-cta{background-color:#f5f5f5;color:#4a4a4a}.file-name{border-color:#dbdbdb;border-style:solid;border-width:1px 1px 1px 0;display:block;max-width:16em;overflow:hidden;text-align:inherit;text-overflow:ellipsis}.file-icon{align-items:center;display:flex;height:1em;justify-content:center;margin-right:.5em;width:1em}.file-icon .fa{font-size:14px}.label{color:#363636;display:block;font-size:1rem;font-weight:700}.label:not(:last-child){margin-bottom:.5em}.label.is-small{font-size:.75rem}.label.is-medium{font-size:1.25rem}.label.is-large{font-size:1.5rem}.help{display:block;font-size:.75rem;margin-top:.25rem}.help.is-white{color:#fff}.help.is-black{color:#0a0a0a}.help.is-light{color:#f5f5f5}.help.is-dark{color:#363636}.help.is-primary{color:#00d1b2}.help.is-link{color:#3273dc}.help.is-info{color:#3298dc}.help.is-success{color:#48c774}.help.is-warning{color:#ffdd57}.help.is-danger{color:#f14668}.field:not(:last-child){margin-bottom:.75rem}.field.has-addons{display:flex;justify-content:flex-start}.field.has-addons .control:not(:last-child){margin-right:-1px}.field.has-addons .control:not(:first-child):not(:last-child) .button,.field.has-addons .control:not(:first-child):not(:last-child) .input,.field.has-addons .control:not(:first-child):not(:last-child) .select select{border-radius:0}.field.has-addons .control:first-child:not(:only-child) .button,.field.has-addons .control:first-child:not(:only-child) .input,.field.has-addons .control:first-child:not(:only-child) .select select{border-bottom-right-radius:0;border-top-right-radius:0}.field.has-addons .control:last-child:not(:only-child) .button,.field.has-addons .control:last-child:not(:only-child) .input,.field.has-addons .control:last-child:not(:only-child) .select select{border-bottom-left-radius:0;border-top-left-radius:0}.field.has-addons .control .button:not([disabled]).is-hovered,.field.has-addons .control .button:not([disabled]):hover,.field.has-addons .control .input:not([disabled]).is-hovered,.field.has-addons .control .input:not([disabled]):hover,.field.has-addons .control .select select:not([disabled]).is-hovered,.field.has-addons .control .select select:not([disabled]):hover{z-index:2}.field.has-addons .control .button:not([disabled]).is-active,.field.has-addons .control .button:not([disabled]).is-focused,.field.has-addons .control .button:not([disabled]):active,.field.has-addons .control .button:not([disabled]):focus,.field.has-addons .control .input:not([disabled]).is-active,.field.has-addons .control .input:not([disabled]).is-focused,.field.has-addons .control .input:not([disabled]):active,.field.has-addons .control .input:not([disabled]):focus,.field.has-addons .control .select select:not([disabled]).is-active,.field.has-addons .control .select select:not([disabled]).is-focused,.field.has-addons .control .select select:not([disabled]):active,.field.has-addons .control .select select:not([disabled]):focus{z-index:3}.field.has-addons .control .button:not([disabled]).is-active:hover,.field.has-addons .control .button:not([disabled]).is-focused:hover,.field.has-addons .control .button:not([disabled]):active:hover,.field.has-addons .control .button:not([disabled]):focus:hover,.field.has-addons .control .input:not([disabled]).is-active:hover,.field.has-addons .control .input:not([disabled]).is-focused:hover,.field.has-addons .control .input:not([disabled]):active:hover,.field.has-addons .control .input:not([disabled]):focus:hover,.field.has-addons .control .select select:not([disabled]).is-active:hover,.field.has-addons .control .select select:not([disabled]).is-focused:hover,.field.has-addons .control .select select:not([disabled]):active:hover,.field.has-addons .control .select select:not([disabled]):focus:hover{z-index:4}.field.has-addons .control.is-expanded{flex-grow:1;flex-shrink:1}.field.has-addons.has-addons-centered{justify-content:center}.field.has-addons.has-addons-right{justify-content:flex-end}.field.has-addons.has-addons-fullwidth .control{flex-grow:1;flex-shrink:0}.field.is-grouped{display:flex;justify-content:flex-start}.field.is-grouped>.control{flex-shrink:0}.field.is-grouped>.control:not(:last-child){margin-bottom:0;margin-right:.75rem}.field.is-grouped>.control.is-expanded{flex-grow:1;flex-shrink:1}.field.is-grouped.is-grouped-centered{justify-content:center}.field.is-grouped.is-grouped-right{justify-content:flex-end}.field.is-grouped.is-grouped-multiline{flex-wrap:wrap}.field.is-grouped.is-grouped-multiline>.control:last-child,.field.is-grouped.is-grouped-multiline>.control:not(:last-child){margin-bottom:.75rem}.field.is-grouped.is-grouped-multiline:last-child{margin-bottom:-.75rem}.field.is-grouped.is-grouped-multiline:not(:last-child){margin-bottom:0}@media screen and (min-width:769px),print{.field.is-horizontal{display:flex}}.field-label .label{font-size:inherit}@media screen and (max-width:768px){.field-label{margin-bottom:.5rem}}@media screen and (min-width:769px),print{.field-label{flex-basis:0;flex-grow:1;flex-shrink:0;margin-right:1.5rem;text-align:right}.field-label.is-small{font-size:.75rem;padding-top:.375em}.field-label.is-normal{padding-top:.375em}.field-label.is-medium{font-size:1.25rem;padding-top:.375em}.field-label.is-large{font-size:1.5rem;padding-top:.375em}}.field-body .field .field{margin-bottom:0}@media screen and (min-width:769px),print{.field-body{display:flex;flex-basis:0;flex-grow:5;flex-shrink:1}.field-body .field{margin-bottom:0}.field-body>.field{flex-shrink:1}.field-body>.field:not(.is-narrow){flex-grow:1}.field-body>.field:not(:last-child){margin-right:.75rem}}.control{box-sizing:border-box;clear:both;font-size:1rem;position:relative;text-align:inherit}.control.has-icons-left .input:focus~.icon,.control.has-icons-left .select:focus~.icon,.control.has-icons-right .input:focus~.icon,.control.has-icons-right .select:focus~.icon{color:#4a4a4a}.control.has-icons-left .input.is-small~.icon,.control.has-icons-left .select.is-small~.icon,.control.has-icons-right .input.is-small~.icon,.control.has-icons-right .select.is-small~.icon{font-size:.75rem}.control.has-icons-left .input.is-medium~.icon,.control.has-icons-left .select.is-medium~.icon,.control.has-icons-right .input.is-medium~.icon,.control.has-icons-right .select.is-medium~.icon{font-size:1.25rem}.control.has-icons-left .input.is-large~.icon,.control.has-icons-left .select.is-large~.icon,.control.has-icons-right .input.is-large~.icon,.control.has-icons-right .select.is-large~.icon{font-size:1.5rem}.control.has-icons-left .icon,.control.has-icons-right .icon{color:#dbdbdb;height:2.5em;pointer-events:none;position:absolute;top:0;width:2.5em;z-index:4}.control.has-icons-left .input,.control.has-icons-left .select select{padding-left:2.5em}.control.has-icons-left .icon.is-left{left:0}.control.has-icons-right .input,.control.has-icons-right .select select{padding-right:2.5em}.control.has-icons-right .icon.is-right{right:0}.control.is-loading::after{position:absolute!important;right:.625em;top:.625em;z-index:4}.control.is-loading.is-small:after{font-size:.75rem}.control.is-loading.is-medium:after{font-size:1.25rem}.control.is-loading.is-large:after{font-size:1.5rem}.breadcrumb{font-size:1rem;white-space:nowrap}.breadcrumb a{align-items:center;color:#3273dc;display:flex;justify-content:center;padding:0 .75em}.breadcrumb a:hover{color:#363636}.breadcrumb li{align-items:center;display:flex}.breadcrumb li:first-child a{padding-left:0}.breadcrumb li.is-active a{color:#363636;cursor:default;pointer-events:none}.breadcrumb li+li::before{color:#b5b5b5;content:"\0002f"}.breadcrumb ol,.breadcrumb ul{align-items:flex-start;display:flex;flex-wrap:wrap;justify-content:flex-start}.breadcrumb .icon:first-child{margin-right:.5em}.breadcrumb .icon:last-child{margin-left:.5em}.breadcrumb.is-centered ol,.breadcrumb.is-centered ul{justify-content:center}.breadcrumb.is-right ol,.breadcrumb.is-right ul{justify-content:flex-end}.breadcrumb.is-small{font-size:.75rem}.breadcrumb.is-medium{font-size:1.25rem}.breadcrumb.is-large{font-size:1.5rem}.breadcrumb.has-arrow-separator li+li::before{content:"\02192"}.breadcrumb.has-bullet-separator li+li::before{content:"\02022"}.breadcrumb.has-dot-separator li+li::before{content:"\000b7"}.breadcrumb.has-succeeds-separator li+li::before{content:"\0227B"}.card{background-color:#fff;border-radius:.25rem;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);color:#4a4a4a;max-width:100%;overflow:hidden;position:relative}.card-header{background-color:transparent;align-items:stretch;box-shadow:0 .125em .25em rgba(10,10,10,.1);display:flex}.card-header-title{align-items:center;color:#363636;display:flex;flex-grow:1;font-weight:700;padding:.75rem 1rem}.card-header-title.is-centered{justify-content:center}.card-header-icon{align-items:center;cursor:pointer;display:flex;justify-content:center;padding:.75rem 1rem}.card-image{display:block;position:relative}.card-content{background-color:transparent;padding:1.5rem}.card-footer{background-color:transparent;border-top:1px solid #ededed;align-items:stretch;display:flex}.card-footer-item{align-items:center;display:flex;flex-basis:0;flex-grow:1;flex-shrink:0;justify-content:center;padding:.75rem}.card-footer-item:not(:last-child){border-right:1px solid #ededed}.card .media:not(:last-child){margin-bottom:1.5rem}.dropdown{display:inline-flex;position:relative;vertical-align:top}.dropdown.is-active .dropdown-menu,.dropdown.is-hoverable:hover .dropdown-menu{display:block}.dropdown.is-right .dropdown-menu{left:auto;right:0}.dropdown.is-up .dropdown-menu{bottom:100%;padding-bottom:4px;padding-top:initial;top:auto}.dropdown-menu{display:none;left:0;min-width:12rem;padding-top:4px;position:absolute;top:100%;z-index:20}.dropdown-content{background-color:#fff;border-radius:4px;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);padding-bottom:.5rem;padding-top:.5rem}.dropdown-item{color:#4a4a4a;display:block;font-size:.875rem;line-height:1.5;padding:.375rem 1rem;position:relative}a.dropdown-item,button.dropdown-item{padding-right:3rem;text-align:inherit;white-space:nowrap;width:100%}a.dropdown-item:hover,button.dropdown-item:hover{background-color:#f5f5f5;color:#0a0a0a}a.dropdown-item.is-active,button.dropdown-item.is-active{background-color:#3273dc;color:#fff}.dropdown-divider{background-color:#ededed;border:none;display:block;height:1px;margin:.5rem 0}.level{align-items:center;justify-content:space-between}.level code{border-radius:4px}.level img{display:inline-block;vertical-align:top}.level.is-mobile{display:flex}.level.is-mobile .level-left,.level.is-mobile .level-right{display:flex}.level.is-mobile .level-left+.level-right{margin-top:0}.level.is-mobile .level-item:not(:last-child){margin-bottom:0;margin-right:.75rem}.level.is-mobile .level-item:not(.is-narrow){flex-grow:1}@media screen and (min-width:769px),print{.level{display:flex}.level>.level-item:not(.is-narrow){flex-grow:1}}.level-item{align-items:center;display:flex;flex-basis:auto;flex-grow:0;flex-shrink:0;justify-content:center}.level-item .subtitle,.level-item .title{margin-bottom:0}@media screen and (max-width:768px){.level-item:not(:last-child){margin-bottom:.75rem}}.level-left,.level-right{flex-basis:auto;flex-grow:0;flex-shrink:0}.level-left .level-item.is-flexible,.level-right .level-item.is-flexible{flex-grow:1}@media screen and (min-width:769px),print{.level-left .level-item:not(:last-child),.level-right .level-item:not(:last-child){margin-right:.75rem}}.level-left{align-items:center;justify-content:flex-start}@media screen and (max-width:768px){.level-left+.level-right{margin-top:1.5rem}}@media screen and (min-width:769px),print{.level-left{display:flex}}.level-right{align-items:center;justify-content:flex-end}@media screen and (min-width:769px),print{.level-right{display:flex}}.media{align-items:flex-start;display:flex;text-align:inherit}.media .content:not(:last-child){margin-bottom:.75rem}.media .media{border-top:1px solid rgba(219,219,219,.5);display:flex;padding-top:.75rem}.media .media .content:not(:last-child),.media .media .control:not(:last-child){margin-bottom:.5rem}.media .media .media{padding-top:.5rem}.media .media .media+.media{margin-top:.5rem}.media+.media{border-top:1px solid rgba(219,219,219,.5);margin-top:1rem;padding-top:1rem}.media.is-large+.media{margin-top:1.5rem;padding-top:1.5rem}.media-left,.media-right{flex-basis:auto;flex-grow:0;flex-shrink:0}.media-left{margin-right:1rem}.media-right{margin-left:1rem}.media-content{flex-basis:auto;flex-grow:1;flex-shrink:1;text-align:inherit}@media screen and (max-width:768px){.media-content{overflow-x:auto}}.menu{font-size:1rem}.menu.is-small{font-size:.75rem}.menu.is-medium{font-size:1.25rem}.menu.is-large{font-size:1.5rem}.menu-list{line-height:1.25}.menu-list a{border-radius:2px;color:#4a4a4a;display:block;padding:.5em .75em}.menu-list a:hover{background-color:#f5f5f5;color:#363636}.menu-list a.is-active{background-color:#3273dc;color:#fff}.menu-list li ul{border-left:1px solid #dbdbdb;margin:.75em;padding-left:.75em}.menu-label{color:#7a7a7a;font-size:.75em;letter-spacing:.1em;text-transform:uppercase}.menu-label:not(:first-child){margin-top:1em}.menu-label:not(:last-child){margin-bottom:1em}.message{background-color:#f5f5f5;border-radius:4px;font-size:1rem}.message strong{color:currentColor}.message a:not(.button):not(.tag):not(.dropdown-item){color:currentColor;text-decoration:underline}.message.is-small{font-size:.75rem}.message.is-medium{font-size:1.25rem}.message.is-large{font-size:1.5rem}.message.is-white{background-color:#fff}.message.is-white .message-header{background-color:#fff;color:#0a0a0a}.message.is-white .message-body{border-color:#fff}.message.is-black{background-color:#fafafa}.message.is-black .message-header{background-color:#0a0a0a;color:#fff}.message.is-black .message-body{border-color:#0a0a0a}.message.is-light{background-color:#fafafa}.message.is-light .message-header{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.message.is-light .message-body{border-color:#f5f5f5}.message.is-dark{background-color:#fafafa}.message.is-dark .message-header{background-color:#363636;color:#fff}.message.is-dark .message-body{border-color:#363636}.message.is-primary{background-color:#ebfffc}.message.is-primary .message-header{background-color:#00d1b2;color:#fff}.message.is-primary .message-body{border-color:#00d1b2;color:#00947e}.message.is-link{background-color:#eef3fc}.message.is-link .message-header{background-color:#3273dc;color:#fff}.message.is-link .message-body{border-color:#3273dc;color:#2160c4}.message.is-info{background-color:#eef6fc}.message.is-info .message-header{background-color:#3298dc;color:#fff}.message.is-info .message-body{border-color:#3298dc;color:#1d72aa}.message.is-success{background-color:#effaf3}.message.is-success .message-header{background-color:#48c774;color:#fff}.message.is-success .message-body{border-color:#48c774;color:#257942}.message.is-warning{background-color:#fffbeb}.message.is-warning .message-header{background-color:#ffdd57;color:rgba(0,0,0,.7)}.message.is-warning .message-body{border-color:#ffdd57;color:#947600}.message.is-danger{background-color:#feecf0}.message.is-danger .message-header{background-color:#f14668;color:#fff}.message.is-danger .message-body{border-color:#f14668;color:#cc0f35}.message-header{align-items:center;background-color:#4a4a4a;border-radius:4px 4px 0 0;color:#fff;display:flex;font-weight:700;justify-content:space-between;line-height:1.25;padding:.75em 1em;position:relative}.message-header .delete{flex-grow:0;flex-shrink:0;margin-left:.75em}.message-header+.message-body{border-width:0;border-top-left-radius:0;border-top-right-radius:0}.message-body{border-color:#dbdbdb;border-radius:4px;border-style:solid;border-width:0 0 0 4px;color:#4a4a4a;padding:1.25em 1.5em}.message-body code,.message-body pre{background-color:#fff}.message-body pre code{background-color:transparent}.modal{align-items:center;display:none;flex-direction:column;justify-content:center;overflow:hidden;position:fixed;z-index:40}.modal.is-active{display:flex}.modal-background{background-color:rgba(10,10,10,.86)}.modal-card,.modal-content{margin:0 20px;max-height:calc(100vh - 160px);overflow:auto;position:relative;width:100%}@media screen and (min-width:769px){.modal-card,.modal-content{margin:0 auto;max-height:calc(100vh - 40px);width:640px}}.modal-close{background:0 0;height:40px;position:fixed;right:20px;top:20px;width:40px}.modal-card{display:flex;flex-direction:column;max-height:calc(100vh - 40px);overflow:hidden;-ms-overflow-y:visible}.modal-card-foot,.modal-card-head{align-items:center;background-color:#f5f5f5;display:flex;flex-shrink:0;justify-content:flex-start;padding:20px;position:relative}.modal-card-head{border-bottom:1px solid #dbdbdb;border-top-left-radius:6px;border-top-right-radius:6px}.modal-card-title{color:#363636;flex-grow:1;flex-shrink:0;font-size:1.5rem;line-height:1}.modal-card-foot{border-bottom-left-radius:6px;border-bottom-right-radius:6px;border-top:1px solid #dbdbdb}.modal-card-foot .button:not(:last-child){margin-right:.5em}.modal-card-body{-webkit-overflow-scrolling:touch;background-color:#fff;flex-grow:1;flex-shrink:1;overflow:auto;padding:20px}.navbar{background-color:#fff;min-height:3.25rem;position:relative;z-index:30}.navbar.is-white{background-color:#fff;color:#0a0a0a}.navbar.is-white .navbar-brand .navbar-link,.navbar.is-white .navbar-brand>.navbar-item{color:#0a0a0a}.navbar.is-white .navbar-brand .navbar-link.is-active,.navbar.is-white .navbar-brand .navbar-link:focus,.navbar.is-white .navbar-brand .navbar-link:hover,.navbar.is-white .navbar-brand>a.navbar-item.is-active,.navbar.is-white .navbar-brand>a.navbar-item:focus,.navbar.is-white .navbar-brand>a.navbar-item:hover{background-color:#f2f2f2;color:#0a0a0a}.navbar.is-white .navbar-brand .navbar-link::after{border-color:#0a0a0a}.navbar.is-white .navbar-burger{color:#0a0a0a}@media screen and (min-width:1024px){.navbar.is-white .navbar-end .navbar-link,.navbar.is-white .navbar-end>.navbar-item,.navbar.is-white .navbar-start .navbar-link,.navbar.is-white .navbar-start>.navbar-item{color:#0a0a0a}.navbar.is-white .navbar-end .navbar-link.is-active,.navbar.is-white .navbar-end .navbar-link:focus,.navbar.is-white .navbar-end .navbar-link:hover,.navbar.is-white .navbar-end>a.navbar-item.is-active,.navbar.is-white .navbar-end>a.navbar-item:focus,.navbar.is-white .navbar-end>a.navbar-item:hover,.navbar.is-white .navbar-start .navbar-link.is-active,.navbar.is-white .navbar-start .navbar-link:focus,.navbar.is-white .navbar-start .navbar-link:hover,.navbar.is-white .navbar-start>a.navbar-item.is-active,.navbar.is-white .navbar-start>a.navbar-item:focus,.navbar.is-white .navbar-start>a.navbar-item:hover{background-color:#f2f2f2;color:#0a0a0a}.navbar.is-white .navbar-end .navbar-link::after,.navbar.is-white .navbar-start .navbar-link::after{border-color:#0a0a0a}.navbar.is-white .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-white .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-white .navbar-item.has-dropdown:hover .navbar-link{background-color:#f2f2f2;color:#0a0a0a}.navbar.is-white .navbar-dropdown a.navbar-item.is-active{background-color:#fff;color:#0a0a0a}}.navbar.is-black{background-color:#0a0a0a;color:#fff}.navbar.is-black .navbar-brand .navbar-link,.navbar.is-black .navbar-brand>.navbar-item{color:#fff}.navbar.is-black .navbar-brand .navbar-link.is-active,.navbar.is-black .navbar-brand .navbar-link:focus,.navbar.is-black .navbar-brand .navbar-link:hover,.navbar.is-black .navbar-brand>a.navbar-item.is-active,.navbar.is-black .navbar-brand>a.navbar-item:focus,.navbar.is-black .navbar-brand>a.navbar-item:hover{background-color:#000;color:#fff}.navbar.is-black .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-black .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-black .navbar-end .navbar-link,.navbar.is-black .navbar-end>.navbar-item,.navbar.is-black .navbar-start .navbar-link,.navbar.is-black .navbar-start>.navbar-item{color:#fff}.navbar.is-black .navbar-end .navbar-link.is-active,.navbar.is-black .navbar-end .navbar-link:focus,.navbar.is-black .navbar-end .navbar-link:hover,.navbar.is-black .navbar-end>a.navbar-item.is-active,.navbar.is-black .navbar-end>a.navbar-item:focus,.navbar.is-black .navbar-end>a.navbar-item:hover,.navbar.is-black .navbar-start .navbar-link.is-active,.navbar.is-black .navbar-start .navbar-link:focus,.navbar.is-black .navbar-start .navbar-link:hover,.navbar.is-black .navbar-start>a.navbar-item.is-active,.navbar.is-black .navbar-start>a.navbar-item:focus,.navbar.is-black .navbar-start>a.navbar-item:hover{background-color:#000;color:#fff}.navbar.is-black .navbar-end .navbar-link::after,.navbar.is-black .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-black .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-black .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-black .navbar-item.has-dropdown:hover .navbar-link{background-color:#000;color:#fff}.navbar.is-black .navbar-dropdown a.navbar-item.is-active{background-color:#0a0a0a;color:#fff}}.navbar.is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-brand .navbar-link,.navbar.is-light .navbar-brand>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-light .navbar-brand .navbar-link.is-active,.navbar.is-light .navbar-brand .navbar-link:focus,.navbar.is-light .navbar-brand .navbar-link:hover,.navbar.is-light .navbar-brand>a.navbar-item.is-active,.navbar.is-light .navbar-brand>a.navbar-item:focus,.navbar.is-light .navbar-brand>a.navbar-item:hover{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-brand .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-light .navbar-burger{color:rgba(0,0,0,.7)}@media screen and (min-width:1024px){.navbar.is-light .navbar-end .navbar-link,.navbar.is-light .navbar-end>.navbar-item,.navbar.is-light .navbar-start .navbar-link,.navbar.is-light .navbar-start>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-light .navbar-end .navbar-link.is-active,.navbar.is-light .navbar-end .navbar-link:focus,.navbar.is-light .navbar-end .navbar-link:hover,.navbar.is-light .navbar-end>a.navbar-item.is-active,.navbar.is-light .navbar-end>a.navbar-item:focus,.navbar.is-light .navbar-end>a.navbar-item:hover,.navbar.is-light .navbar-start .navbar-link.is-active,.navbar.is-light .navbar-start .navbar-link:focus,.navbar.is-light .navbar-start .navbar-link:hover,.navbar.is-light .navbar-start>a.navbar-item.is-active,.navbar.is-light .navbar-start>a.navbar-item:focus,.navbar.is-light .navbar-start>a.navbar-item:hover{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-end .navbar-link::after,.navbar.is-light .navbar-start .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-light .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-light .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-light .navbar-item.has-dropdown:hover .navbar-link{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-dropdown a.navbar-item.is-active{background-color:#f5f5f5;color:rgba(0,0,0,.7)}}.navbar.is-dark{background-color:#363636;color:#fff}.navbar.is-dark .navbar-brand .navbar-link,.navbar.is-dark .navbar-brand>.navbar-item{color:#fff}.navbar.is-dark .navbar-brand .navbar-link.is-active,.navbar.is-dark .navbar-brand .navbar-link:focus,.navbar.is-dark .navbar-brand .navbar-link:hover,.navbar.is-dark .navbar-brand>a.navbar-item.is-active,.navbar.is-dark .navbar-brand>a.navbar-item:focus,.navbar.is-dark .navbar-brand>a.navbar-item:hover{background-color:#292929;color:#fff}.navbar.is-dark .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-dark .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-dark .navbar-end .navbar-link,.navbar.is-dark .navbar-end>.navbar-item,.navbar.is-dark .navbar-start .navbar-link,.navbar.is-dark .navbar-start>.navbar-item{color:#fff}.navbar.is-dark .navbar-end .navbar-link.is-active,.navbar.is-dark .navbar-end .navbar-link:focus,.navbar.is-dark .navbar-end .navbar-link:hover,.navbar.is-dark .navbar-end>a.navbar-item.is-active,.navbar.is-dark .navbar-end>a.navbar-item:focus,.navbar.is-dark .navbar-end>a.navbar-item:hover,.navbar.is-dark .navbar-start .navbar-link.is-active,.navbar.is-dark .navbar-start .navbar-link:focus,.navbar.is-dark .navbar-start .navbar-link:hover,.navbar.is-dark .navbar-start>a.navbar-item.is-active,.navbar.is-dark .navbar-start>a.navbar-item:focus,.navbar.is-dark .navbar-start>a.navbar-item:hover{background-color:#292929;color:#fff}.navbar.is-dark .navbar-end .navbar-link::after,.navbar.is-dark .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-dark .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-dark .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-dark .navbar-item.has-dropdown:hover .navbar-link{background-color:#292929;color:#fff}.navbar.is-dark .navbar-dropdown a.navbar-item.is-active{background-color:#363636;color:#fff}}.navbar.is-primary{background-color:#00d1b2;color:#fff}.navbar.is-primary .navbar-brand .navbar-link,.navbar.is-primary .navbar-brand>.navbar-item{color:#fff}.navbar.is-primary .navbar-brand .navbar-link.is-active,.navbar.is-primary .navbar-brand .navbar-link:focus,.navbar.is-primary .navbar-brand .navbar-link:hover,.navbar.is-primary .navbar-brand>a.navbar-item.is-active,.navbar.is-primary .navbar-brand>a.navbar-item:focus,.navbar.is-primary .navbar-brand>a.navbar-item:hover{background-color:#00b89c;color:#fff}.navbar.is-primary .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-primary .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-primary .navbar-end .navbar-link,.navbar.is-primary .navbar-end>.navbar-item,.navbar.is-primary .navbar-start .navbar-link,.navbar.is-primary .navbar-start>.navbar-item{color:#fff}.navbar.is-primary .navbar-end .navbar-link.is-active,.navbar.is-primary .navbar-end .navbar-link:focus,.navbar.is-primary .navbar-end .navbar-link:hover,.navbar.is-primary .navbar-end>a.navbar-item.is-active,.navbar.is-primary .navbar-end>a.navbar-item:focus,.navbar.is-primary .navbar-end>a.navbar-item:hover,.navbar.is-primary .navbar-start .navbar-link.is-active,.navbar.is-primary .navbar-start .navbar-link:focus,.navbar.is-primary .navbar-start .navbar-link:hover,.navbar.is-primary .navbar-start>a.navbar-item.is-active,.navbar.is-primary .navbar-start>a.navbar-item:focus,.navbar.is-primary .navbar-start>a.navbar-item:hover{background-color:#00b89c;color:#fff}.navbar.is-primary .navbar-end .navbar-link::after,.navbar.is-primary .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-primary .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-primary .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-primary .navbar-item.has-dropdown:hover .navbar-link{background-color:#00b89c;color:#fff}.navbar.is-primary .navbar-dropdown a.navbar-item.is-active{background-color:#00d1b2;color:#fff}}.navbar.is-link{background-color:#3273dc;color:#fff}.navbar.is-link .navbar-brand .navbar-link,.navbar.is-link .navbar-brand>.navbar-item{color:#fff}.navbar.is-link .navbar-brand .navbar-link.is-active,.navbar.is-link .navbar-brand .navbar-link:focus,.navbar.is-link .navbar-brand .navbar-link:hover,.navbar.is-link .navbar-brand>a.navbar-item.is-active,.navbar.is-link .navbar-brand>a.navbar-item:focus,.navbar.is-link .navbar-brand>a.navbar-item:hover{background-color:#2366d1;color:#fff}.navbar.is-link .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-link .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-link .navbar-end .navbar-link,.navbar.is-link .navbar-end>.navbar-item,.navbar.is-link .navbar-start .navbar-link,.navbar.is-link .navbar-start>.navbar-item{color:#fff}.navbar.is-link .navbar-end .navbar-link.is-active,.navbar.is-link .navbar-end .navbar-link:focus,.navbar.is-link .navbar-end .navbar-link:hover,.navbar.is-link .navbar-end>a.navbar-item.is-active,.navbar.is-link .navbar-end>a.navbar-item:focus,.navbar.is-link .navbar-end>a.navbar-item:hover,.navbar.is-link .navbar-start .navbar-link.is-active,.navbar.is-link .navbar-start .navbar-link:focus,.navbar.is-link .navbar-start .navbar-link:hover,.navbar.is-link .navbar-start>a.navbar-item.is-active,.navbar.is-link .navbar-start>a.navbar-item:focus,.navbar.is-link .navbar-start>a.navbar-item:hover{background-color:#2366d1;color:#fff}.navbar.is-link .navbar-end .navbar-link::after,.navbar.is-link .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-link .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-link .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-link .navbar-item.has-dropdown:hover .navbar-link{background-color:#2366d1;color:#fff}.navbar.is-link .navbar-dropdown a.navbar-item.is-active{background-color:#3273dc;color:#fff}}.navbar.is-info{background-color:#3298dc;color:#fff}.navbar.is-info .navbar-brand .navbar-link,.navbar.is-info .navbar-brand>.navbar-item{color:#fff}.navbar.is-info .navbar-brand .navbar-link.is-active,.navbar.is-info .navbar-brand .navbar-link:focus,.navbar.is-info .navbar-brand .navbar-link:hover,.navbar.is-info .navbar-brand>a.navbar-item.is-active,.navbar.is-info .navbar-brand>a.navbar-item:focus,.navbar.is-info .navbar-brand>a.navbar-item:hover{background-color:#238cd1;color:#fff}.navbar.is-info .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-info .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-info .navbar-end .navbar-link,.navbar.is-info .navbar-end>.navbar-item,.navbar.is-info .navbar-start .navbar-link,.navbar.is-info .navbar-start>.navbar-item{color:#fff}.navbar.is-info .navbar-end .navbar-link.is-active,.navbar.is-info .navbar-end .navbar-link:focus,.navbar.is-info .navbar-end .navbar-link:hover,.navbar.is-info .navbar-end>a.navbar-item.is-active,.navbar.is-info .navbar-end>a.navbar-item:focus,.navbar.is-info .navbar-end>a.navbar-item:hover,.navbar.is-info .navbar-start .navbar-link.is-active,.navbar.is-info .navbar-start .navbar-link:focus,.navbar.is-info .navbar-start .navbar-link:hover,.navbar.is-info .navbar-start>a.navbar-item.is-active,.navbar.is-info .navbar-start>a.navbar-item:focus,.navbar.is-info .navbar-start>a.navbar-item:hover{background-color:#238cd1;color:#fff}.navbar.is-info .navbar-end .navbar-link::after,.navbar.is-info .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-info .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-info .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-info .navbar-item.has-dropdown:hover .navbar-link{background-color:#238cd1;color:#fff}.navbar.is-info .navbar-dropdown a.navbar-item.is-active{background-color:#3298dc;color:#fff}}.navbar.is-success{background-color:#48c774;color:#fff}.navbar.is-success .navbar-brand .navbar-link,.navbar.is-success .navbar-brand>.navbar-item{color:#fff}.navbar.is-success .navbar-brand .navbar-link.is-active,.navbar.is-success .navbar-brand .navbar-link:focus,.navbar.is-success .navbar-brand .navbar-link:hover,.navbar.is-success .navbar-brand>a.navbar-item.is-active,.navbar.is-success .navbar-brand>a.navbar-item:focus,.navbar.is-success .navbar-brand>a.navbar-item:hover{background-color:#3abb67;color:#fff}.navbar.is-success .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-success .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-success .navbar-end .navbar-link,.navbar.is-success .navbar-end>.navbar-item,.navbar.is-success .navbar-start .navbar-link,.navbar.is-success .navbar-start>.navbar-item{color:#fff}.navbar.is-success .navbar-end .navbar-link.is-active,.navbar.is-success .navbar-end .navbar-link:focus,.navbar.is-success .navbar-end .navbar-link:hover,.navbar.is-success .navbar-end>a.navbar-item.is-active,.navbar.is-success .navbar-end>a.navbar-item:focus,.navbar.is-success .navbar-end>a.navbar-item:hover,.navbar.is-success .navbar-start .navbar-link.is-active,.navbar.is-success .navbar-start .navbar-link:focus,.navbar.is-success .navbar-start .navbar-link:hover,.navbar.is-success .navbar-start>a.navbar-item.is-active,.navbar.is-success .navbar-start>a.navbar-item:focus,.navbar.is-success .navbar-start>a.navbar-item:hover{background-color:#3abb67;color:#fff}.navbar.is-success .navbar-end .navbar-link::after,.navbar.is-success .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-success .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-success .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-success .navbar-item.has-dropdown:hover .navbar-link{background-color:#3abb67;color:#fff}.navbar.is-success .navbar-dropdown a.navbar-item.is-active{background-color:#48c774;color:#fff}}.navbar.is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-brand .navbar-link,.navbar.is-warning .navbar-brand>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-brand .navbar-link.is-active,.navbar.is-warning .navbar-brand .navbar-link:focus,.navbar.is-warning .navbar-brand .navbar-link:hover,.navbar.is-warning .navbar-brand>a.navbar-item.is-active,.navbar.is-warning .navbar-brand>a.navbar-item:focus,.navbar.is-warning .navbar-brand>a.navbar-item:hover{background-color:#ffd83d;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-brand .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-burger{color:rgba(0,0,0,.7)}@media screen and (min-width:1024px){.navbar.is-warning .navbar-end .navbar-link,.navbar.is-warning .navbar-end>.navbar-item,.navbar.is-warning .navbar-start .navbar-link,.navbar.is-warning .navbar-start>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-end .navbar-link.is-active,.navbar.is-warning .navbar-end .navbar-link:focus,.navbar.is-warning .navbar-end .navbar-link:hover,.navbar.is-warning .navbar-end>a.navbar-item.is-active,.navbar.is-warning .navbar-end>a.navbar-item:focus,.navbar.is-warning .navbar-end>a.navbar-item:hover,.navbar.is-warning .navbar-start .navbar-link.is-active,.navbar.is-warning .navbar-start .navbar-link:focus,.navbar.is-warning .navbar-start .navbar-link:hover,.navbar.is-warning .navbar-start>a.navbar-item.is-active,.navbar.is-warning .navbar-start>a.navbar-item:focus,.navbar.is-warning .navbar-start>a.navbar-item:hover{background-color:#ffd83d;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-end .navbar-link::after,.navbar.is-warning .navbar-start .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-warning .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-warning .navbar-item.has-dropdown:hover .navbar-link{background-color:#ffd83d;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-dropdown a.navbar-item.is-active{background-color:#ffdd57;color:rgba(0,0,0,.7)}}.navbar.is-danger{background-color:#f14668;color:#fff}.navbar.is-danger .navbar-brand .navbar-link,.navbar.is-danger .navbar-brand>.navbar-item{color:#fff}.navbar.is-danger .navbar-brand .navbar-link.is-active,.navbar.is-danger .navbar-brand .navbar-link:focus,.navbar.is-danger .navbar-brand .navbar-link:hover,.navbar.is-danger .navbar-brand>a.navbar-item.is-active,.navbar.is-danger .navbar-brand>a.navbar-item:focus,.navbar.is-danger .navbar-brand>a.navbar-item:hover{background-color:#ef2e55;color:#fff}.navbar.is-danger .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-danger .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-danger .navbar-end .navbar-link,.navbar.is-danger .navbar-end>.navbar-item,.navbar.is-danger .navbar-start .navbar-link,.navbar.is-danger .navbar-start>.navbar-item{color:#fff}.navbar.is-danger .navbar-end .navbar-link.is-active,.navbar.is-danger .navbar-end .navbar-link:focus,.navbar.is-danger .navbar-end .navbar-link:hover,.navbar.is-danger .navbar-end>a.navbar-item.is-active,.navbar.is-danger .navbar-end>a.navbar-item:focus,.navbar.is-danger .navbar-end>a.navbar-item:hover,.navbar.is-danger .navbar-start .navbar-link.is-active,.navbar.is-danger .navbar-start .navbar-link:focus,.navbar.is-danger .navbar-start .navbar-link:hover,.navbar.is-danger .navbar-start>a.navbar-item.is-active,.navbar.is-danger .navbar-start>a.navbar-item:focus,.navbar.is-danger .navbar-start>a.navbar-item:hover{background-color:#ef2e55;color:#fff}.navbar.is-danger .navbar-end .navbar-link::after,.navbar.is-danger .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-danger .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-danger .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-danger .navbar-item.has-dropdown:hover .navbar-link{background-color:#ef2e55;color:#fff}.navbar.is-danger .navbar-dropdown a.navbar-item.is-active{background-color:#f14668;color:#fff}}.navbar>.container{align-items:stretch;display:flex;min-height:3.25rem;width:100%}.navbar.has-shadow{box-shadow:0 2px 0 0 #f5f5f5}.navbar.is-fixed-bottom,.navbar.is-fixed-top{left:0;position:fixed;right:0;z-index:30}.navbar.is-fixed-bottom{bottom:0}.navbar.is-fixed-bottom.has-shadow{box-shadow:0 -2px 0 0 #f5f5f5}.navbar.is-fixed-top{top:0}body.has-navbar-fixed-top,html.has-navbar-fixed-top{padding-top:3.25rem}body.has-navbar-fixed-bottom,html.has-navbar-fixed-bottom{padding-bottom:3.25rem}.navbar-brand,.navbar-tabs{align-items:stretch;display:flex;flex-shrink:0;min-height:3.25rem}.navbar-brand a.navbar-item:focus,.navbar-brand a.navbar-item:hover{background-color:transparent}.navbar-tabs{-webkit-overflow-scrolling:touch;max-width:100vw;overflow-x:auto;overflow-y:hidden}.navbar-burger{color:#4a4a4a;cursor:pointer;display:block;height:3.25rem;position:relative;width:3.25rem;margin-left:auto}.navbar-burger span{background-color:currentColor;display:block;height:1px;left:calc(50% - 8px);position:absolute;transform-origin:center;transition-duration:86ms;transition-property:background-color,opacity,transform;transition-timing-function:ease-out;width:16px}.navbar-burger span:nth-child(1){top:calc(50% - 6px)}.navbar-burger span:nth-child(2){top:calc(50% - 1px)}.navbar-burger span:nth-child(3){top:calc(50% + 4px)}.navbar-burger:hover{background-color:rgba(0,0,0,.05)}.navbar-burger.is-active span:nth-child(1){transform:translateY(5px) rotate(45deg)}.navbar-burger.is-active span:nth-child(2){opacity:0}.navbar-burger.is-active span:nth-child(3){transform:translateY(-5px) rotate(-45deg)}.navbar-menu{display:none}.navbar-item,.navbar-link{color:#4a4a4a;display:block;line-height:1.5;padding:.5rem .75rem;position:relative}.navbar-item .icon:only-child,.navbar-link .icon:only-child{margin-left:-.25rem;margin-right:-.25rem}.navbar-link,a.navbar-item{cursor:pointer}.navbar-link.is-active,.navbar-link:focus,.navbar-link:focus-within,.navbar-link:hover,a.navbar-item.is-active,a.navbar-item:focus,a.navbar-item:focus-within,a.navbar-item:hover{background-color:#fafafa;color:#3273dc}.navbar-item{flex-grow:0;flex-shrink:0}.navbar-item img{max-height:1.75rem}.navbar-item.has-dropdown{padding:0}.navbar-item.is-expanded{flex-grow:1;flex-shrink:1}.navbar-item.is-tab{border-bottom:1px solid transparent;min-height:3.25rem;padding-bottom:calc(.5rem - 1px)}.navbar-item.is-tab:focus,.navbar-item.is-tab:hover{background-color:transparent;border-bottom-color:#3273dc}.navbar-item.is-tab.is-active{background-color:transparent;border-bottom-color:#3273dc;border-bottom-style:solid;border-bottom-width:3px;color:#3273dc;padding-bottom:calc(.5rem - 3px)}.navbar-content{flex-grow:1;flex-shrink:1}.navbar-link:not(.is-arrowless){padding-right:2.5em}.navbar-link:not(.is-arrowless)::after{border-color:#3273dc;margin-top:-.375em;right:1.125em}.navbar-dropdown{font-size:.875rem;padding-bottom:.5rem;padding-top:.5rem}.navbar-dropdown .navbar-item{padding-left:1.5rem;padding-right:1.5rem}.navbar-divider{background-color:#f5f5f5;border:none;display:none;height:2px;margin:.5rem 0}@media screen and (max-width:1023px){.navbar>.container{display:block}.navbar-brand .navbar-item,.navbar-tabs .navbar-item{align-items:center;display:flex}.navbar-link::after{display:none}.navbar-menu{background-color:#fff;box-shadow:0 8px 16px rgba(10,10,10,.1);padding:.5rem 0}.navbar-menu.is-active{display:block}.navbar.is-fixed-bottom-touch,.navbar.is-fixed-top-touch{left:0;position:fixed;right:0;z-index:30}.navbar.is-fixed-bottom-touch{bottom:0}.navbar.is-fixed-bottom-touch.has-shadow{box-shadow:0 -2px 3px rgba(10,10,10,.1)}.navbar.is-fixed-top-touch{top:0}.navbar.is-fixed-top .navbar-menu,.navbar.is-fixed-top-touch .navbar-menu{-webkit-overflow-scrolling:touch;max-height:calc(100vh - 3.25rem);overflow:auto}body.has-navbar-fixed-top-touch,html.has-navbar-fixed-top-touch{padding-top:3.25rem}body.has-navbar-fixed-bottom-touch,html.has-navbar-fixed-bottom-touch{padding-bottom:3.25rem}}@media screen and (min-width:1024px){.navbar,.navbar-end,.navbar-menu,.navbar-start{align-items:stretch;display:flex}.navbar{min-height:3.25rem}.navbar.is-spaced{padding:1rem 2rem}.navbar.is-spaced .navbar-end,.navbar.is-spaced .navbar-start{align-items:center}.navbar.is-spaced .navbar-link,.navbar.is-spaced a.navbar-item{border-radius:4px}.navbar.is-transparent .navbar-link.is-active,.navbar.is-transparent .navbar-link:focus,.navbar.is-transparent .navbar-link:hover,.navbar.is-transparent a.navbar-item.is-active,.navbar.is-transparent a.navbar-item:focus,.navbar.is-transparent a.navbar-item:hover{background-color:transparent!important}.navbar.is-transparent .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-transparent .navbar-item.has-dropdown.is-hoverable:focus .navbar-link,.navbar.is-transparent .navbar-item.has-dropdown.is-hoverable:focus-within .navbar-link,.navbar.is-transparent .navbar-item.has-dropdown.is-hoverable:hover .navbar-link{background-color:transparent!important}.navbar.is-transparent .navbar-dropdown a.navbar-item:focus,.navbar.is-transparent .navbar-dropdown a.navbar-item:hover{background-color:#f5f5f5;color:#0a0a0a}.navbar.is-transparent .navbar-dropdown a.navbar-item.is-active{background-color:#f5f5f5;color:#3273dc}.navbar-burger{display:none}.navbar-item,.navbar-link{align-items:center;display:flex}.navbar-item.has-dropdown{align-items:stretch}.navbar-item.has-dropdown-up .navbar-link::after{transform:rotate(135deg) translate(.25em,-.25em)}.navbar-item.has-dropdown-up .navbar-dropdown{border-bottom:2px solid #dbdbdb;border-radius:6px 6px 0 0;border-top:none;bottom:100%;box-shadow:0 -8px 8px rgba(10,10,10,.1);top:auto}.navbar-item.is-active .navbar-dropdown,.navbar-item.is-hoverable:focus .navbar-dropdown,.navbar-item.is-hoverable:focus-within .navbar-dropdown,.navbar-item.is-hoverable:hover .navbar-dropdown{display:block}.navbar-item.is-active .navbar-dropdown.is-boxed,.navbar-item.is-hoverable:focus .navbar-dropdown.is-boxed,.navbar-item.is-hoverable:focus-within .navbar-dropdown.is-boxed,.navbar-item.is-hoverable:hover .navbar-dropdown.is-boxed,.navbar.is-spaced .navbar-item.is-active .navbar-dropdown,.navbar.is-spaced .navbar-item.is-hoverable:focus .navbar-dropdown,.navbar.is-spaced .navbar-item.is-hoverable:focus-within .navbar-dropdown,.navbar.is-spaced .navbar-item.is-hoverable:hover .navbar-dropdown{opacity:1;pointer-events:auto;transform:translateY(0)}.navbar-menu{flex-grow:1;flex-shrink:0}.navbar-start{justify-content:flex-start;margin-right:auto}.navbar-end{justify-content:flex-end;margin-left:auto}.navbar-dropdown{background-color:#fff;border-bottom-left-radius:6px;border-bottom-right-radius:6px;border-top:2px solid #dbdbdb;box-shadow:0 8px 8px rgba(10,10,10,.1);display:none;font-size:.875rem;left:0;min-width:100%;position:absolute;top:100%;z-index:20}.navbar-dropdown .navbar-item{padding:.375rem 1rem;white-space:nowrap}.navbar-dropdown a.navbar-item{padding-right:3rem}.navbar-dropdown a.navbar-item:focus,.navbar-dropdown a.navbar-item:hover{background-color:#f5f5f5;color:#0a0a0a}.navbar-dropdown a.navbar-item.is-active{background-color:#f5f5f5;color:#3273dc}.navbar-dropdown.is-boxed,.navbar.is-spaced .navbar-dropdown{border-radius:6px;border-top:none;box-shadow:0 8px 8px rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.1);display:block;opacity:0;pointer-events:none;top:calc(100% + (-4px));transform:translateY(-5px);transition-duration:86ms;transition-property:opacity,transform}.navbar-dropdown.is-right{left:auto;right:0}.navbar-divider{display:block}.container>.navbar .navbar-brand,.navbar>.container .navbar-brand{margin-left:-.75rem}.container>.navbar .navbar-menu,.navbar>.container .navbar-menu{margin-right:-.75rem}.navbar.is-fixed-bottom-desktop,.navbar.is-fixed-top-desktop{left:0;position:fixed;right:0;z-index:30}.navbar.is-fixed-bottom-desktop{bottom:0}.navbar.is-fixed-bottom-desktop.has-shadow{box-shadow:0 -2px 3px rgba(10,10,10,.1)}.navbar.is-fixed-top-desktop{top:0}body.has-navbar-fixed-top-desktop,html.has-navbar-fixed-top-desktop{padding-top:3.25rem}body.has-navbar-fixed-bottom-desktop,html.has-navbar-fixed-bottom-desktop{padding-bottom:3.25rem}body.has-spaced-navbar-fixed-top,html.has-spaced-navbar-fixed-top{padding-top:5.25rem}body.has-spaced-navbar-fixed-bottom,html.has-spaced-navbar-fixed-bottom{padding-bottom:5.25rem}.navbar-link.is-active,a.navbar-item.is-active{color:#0a0a0a}.navbar-link.is-active:not(:focus):not(:hover),a.navbar-item.is-active:not(:focus):not(:hover){background-color:transparent}.navbar-item.has-dropdown.is-active .navbar-link,.navbar-item.has-dropdown:focus .navbar-link,.navbar-item.has-dropdown:hover .navbar-link{background-color:#fafafa}}.hero.is-fullheight-with-navbar{min-height:calc(100vh - 3.25rem)}.pagination{font-size:1rem;margin:-.25rem}.pagination.is-small{font-size:.75rem}.pagination.is-medium{font-size:1.25rem}.pagination.is-large{font-size:1.5rem}.pagination.is-rounded .pagination-next,.pagination.is-rounded .pagination-previous{padding-left:1em;padding-right:1em;border-radius:290486px}.pagination.is-rounded .pagination-link{border-radius:290486px}.pagination,.pagination-list{align-items:center;display:flex;justify-content:center;text-align:center}.pagination-ellipsis,.pagination-link,.pagination-next,.pagination-previous{font-size:1em;justify-content:center;margin:.25rem;padding-left:.5em;padding-right:.5em;text-align:center}.pagination-link,.pagination-next,.pagination-previous{border-color:#dbdbdb;color:#363636;min-width:2.5em}.pagination-link:hover,.pagination-next:hover,.pagination-previous:hover{border-color:#b5b5b5;color:#363636}.pagination-link:focus,.pagination-next:focus,.pagination-previous:focus{border-color:#3273dc}.pagination-link:active,.pagination-next:active,.pagination-previous:active{box-shadow:inset 0 1px 2px rgba(10,10,10,.2)}.pagination-link[disabled],.pagination-next[disabled],.pagination-previous[disabled]{background-color:#dbdbdb;border-color:#dbdbdb;box-shadow:none;color:#7a7a7a;opacity:.5}.pagination-next,.pagination-previous{padding-left:.75em;padding-right:.75em;white-space:nowrap}.pagination-link.is-current{background-color:#3273dc;border-color:#3273dc;color:#fff}.pagination-ellipsis{color:#b5b5b5;pointer-events:none}.pagination-list{flex-wrap:wrap}@media screen and (max-width:768px){.pagination{flex-wrap:wrap}.pagination-next,.pagination-previous{flex-grow:1;flex-shrink:1}.pagination-list li{flex-grow:1;flex-shrink:1}}@media screen and (min-width:769px),print{.pagination-list{flex-grow:1;flex-shrink:1;justify-content:flex-start;order:1}.pagination-previous{order:2}.pagination-next{order:3}.pagination{justify-content:space-between}.pagination.is-centered .pagination-previous{order:1}.pagination.is-centered .pagination-list{justify-content:center;order:2}.pagination.is-centered .pagination-next{order:3}.pagination.is-right .pagination-previous{order:1}.pagination.is-right .pagination-next{order:2}.pagination.is-right .pagination-list{justify-content:flex-end;order:3}}.panel{border-radius:6px;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);font-size:1rem}.panel:not(:last-child){margin-bottom:1.5rem}.panel.is-white .panel-heading{background-color:#fff;color:#0a0a0a}.panel.is-white .panel-tabs a.is-active{border-bottom-color:#fff}.panel.is-white .panel-block.is-active .panel-icon{color:#fff}.panel.is-black .panel-heading{background-color:#0a0a0a;color:#fff}.panel.is-black .panel-tabs a.is-active{border-bottom-color:#0a0a0a}.panel.is-black .panel-block.is-active .panel-icon{color:#0a0a0a}.panel.is-light .panel-heading{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.panel.is-light .panel-tabs a.is-active{border-bottom-color:#f5f5f5}.panel.is-light .panel-block.is-active .panel-icon{color:#f5f5f5}.panel.is-dark .panel-heading{background-color:#363636;color:#fff}.panel.is-dark .panel-tabs a.is-active{border-bottom-color:#363636}.panel.is-dark .panel-block.is-active .panel-icon{color:#363636}.panel.is-primary .panel-heading{background-color:#00d1b2;color:#fff}.panel.is-primary .panel-tabs a.is-active{border-bottom-color:#00d1b2}.panel.is-primary .panel-block.is-active .panel-icon{color:#00d1b2}.panel.is-link .panel-heading{background-color:#3273dc;color:#fff}.panel.is-link .panel-tabs a.is-active{border-bottom-color:#3273dc}.panel.is-link .panel-block.is-active .panel-icon{color:#3273dc}.panel.is-info .panel-heading{background-color:#3298dc;color:#fff}.panel.is-info .panel-tabs a.is-active{border-bottom-color:#3298dc}.panel.is-info .panel-block.is-active .panel-icon{color:#3298dc}.panel.is-success .panel-heading{background-color:#48c774;color:#fff}.panel.is-success .panel-tabs a.is-active{border-bottom-color:#48c774}.panel.is-success .panel-block.is-active .panel-icon{color:#48c774}.panel.is-warning .panel-heading{background-color:#ffdd57;color:rgba(0,0,0,.7)}.panel.is-warning .panel-tabs a.is-active{border-bottom-color:#ffdd57}.panel.is-warning .panel-block.is-active .panel-icon{color:#ffdd57}.panel.is-danger .panel-heading{background-color:#f14668;color:#fff}.panel.is-danger .panel-tabs a.is-active{border-bottom-color:#f14668}.panel.is-danger .panel-block.is-active .panel-icon{color:#f14668}.panel-block:not(:last-child),.panel-tabs:not(:last-child){border-bottom:1px solid #ededed}.panel-heading{background-color:#ededed;border-radius:6px 6px 0 0;color:#363636;font-size:1.25em;font-weight:700;line-height:1.25;padding:.75em 1em}.panel-tabs{align-items:flex-end;display:flex;font-size:.875em;justify-content:center}.panel-tabs a{border-bottom:1px solid #dbdbdb;margin-bottom:-1px;padding:.5em}.panel-tabs a.is-active{border-bottom-color:#4a4a4a;color:#363636}.panel-list a{color:#4a4a4a}.panel-list a:hover{color:#3273dc}.panel-block{align-items:center;color:#363636;display:flex;justify-content:flex-start;padding:.5em .75em}.panel-block input[type=checkbox]{margin-right:.75em}.panel-block>.control{flex-grow:1;flex-shrink:1;width:100%}.panel-block.is-wrapped{flex-wrap:wrap}.panel-block.is-active{border-left-color:#3273dc;color:#363636}.panel-block.is-active .panel-icon{color:#3273dc}.panel-block:last-child{border-bottom-left-radius:6px;border-bottom-right-radius:6px}a.panel-block,label.panel-block{cursor:pointer}a.panel-block:hover,label.panel-block:hover{background-color:#f5f5f5}.panel-icon{display:inline-block;font-size:14px;height:1em;line-height:1em;text-align:center;vertical-align:top;width:1em;color:#7a7a7a;margin-right:.75em}.panel-icon .fa{font-size:inherit;line-height:inherit}.tabs{-webkit-overflow-scrolling:touch;align-items:stretch;display:flex;font-size:1rem;justify-content:space-between;overflow:hidden;overflow-x:auto;white-space:nowrap}.tabs a{align-items:center;border-bottom-color:#dbdbdb;border-bottom-style:solid;border-bottom-width:1px;color:#4a4a4a;display:flex;justify-content:center;margin-bottom:-1px;padding:.5em 1em;vertical-align:top}.tabs a:hover{border-bottom-color:#363636;color:#363636}.tabs li{display:block}.tabs li.is-active a{border-bottom-color:#3273dc;color:#3273dc}.tabs ul{align-items:center;border-bottom-color:#dbdbdb;border-bottom-style:solid;border-bottom-width:1px;display:flex;flex-grow:1;flex-shrink:0;justify-content:flex-start}.tabs ul.is-left{padding-right:.75em}.tabs ul.is-center{flex:none;justify-content:center;padding-left:.75em;padding-right:.75em}.tabs ul.is-right{justify-content:flex-end;padding-left:.75em}.tabs .icon:first-child{margin-right:.5em}.tabs .icon:last-child{margin-left:.5em}.tabs.is-centered ul{justify-content:center}.tabs.is-right ul{justify-content:flex-end}.tabs.is-boxed a{border:1px solid transparent;border-radius:4px 4px 0 0}.tabs.is-boxed a:hover{background-color:#f5f5f5;border-bottom-color:#dbdbdb}.tabs.is-boxed li.is-active a{background-color:#fff;border-color:#dbdbdb;border-bottom-color:transparent!important}.tabs.is-fullwidth li{flex-grow:1;flex-shrink:0}.tabs.is-toggle a{border-color:#dbdbdb;border-style:solid;border-width:1px;margin-bottom:0;position:relative}.tabs.is-toggle a:hover{background-color:#f5f5f5;border-color:#b5b5b5;z-index:2}.tabs.is-toggle li+li{margin-left:-1px}.tabs.is-toggle li:first-child a{border-top-left-radius:4px;border-bottom-left-radius:4px}.tabs.is-toggle li:last-child a{border-top-right-radius:4px;border-bottom-right-radius:4px}.tabs.is-toggle li.is-active a{background-color:#3273dc;border-color:#3273dc;color:#fff;z-index:1}.tabs.is-toggle ul{border-bottom:none}.tabs.is-toggle.is-toggle-rounded li:first-child a{border-bottom-left-radius:290486px;border-top-left-radius:290486px;padding-left:1.25em}.tabs.is-toggle.is-toggle-rounded li:last-child a{border-bottom-right-radius:290486px;border-top-right-radius:290486px;padding-right:1.25em}.tabs.is-small{font-size:.75rem}.tabs.is-medium{font-size:1.25rem}.tabs.is-large{font-size:1.5rem}.column{display:block;flex-basis:0;flex-grow:1;flex-shrink:1;padding:.75rem}.columns.is-mobile>.column.is-narrow{flex:none}.columns.is-mobile>.column.is-full{flex:none;width:100%}.columns.is-mobile>.column.is-three-quarters{flex:none;width:75%}.columns.is-mobile>.column.is-two-thirds{flex:none;width:66.6666%}.columns.is-mobile>.column.is-half{flex:none;width:50%}.columns.is-mobile>.column.is-one-third{flex:none;width:33.3333%}.columns.is-mobile>.column.is-one-quarter{flex:none;width:25%}.columns.is-mobile>.column.is-one-fifth{flex:none;width:20%}.columns.is-mobile>.column.is-two-fifths{flex:none;width:40%}.columns.is-mobile>.column.is-three-fifths{flex:none;width:60%}.columns.is-mobile>.column.is-four-fifths{flex:none;width:80%}.columns.is-mobile>.column.is-offset-three-quarters{margin-left:75%}.columns.is-mobile>.column.is-offset-two-thirds{margin-left:66.6666%}.columns.is-mobile>.column.is-offset-half{margin-left:50%}.columns.is-mobile>.column.is-offset-one-third{margin-left:33.3333%}.columns.is-mobile>.column.is-offset-one-quarter{margin-left:25%}.columns.is-mobile>.column.is-offset-one-fifth{margin-left:20%}.columns.is-mobile>.column.is-offset-two-fifths{margin-left:40%}.columns.is-mobile>.column.is-offset-three-fifths{margin-left:60%}.columns.is-mobile>.column.is-offset-four-fifths{margin-left:80%}.columns.is-mobile>.column.is-0{flex:none;width:0%}.columns.is-mobile>.column.is-offset-0{margin-left:0}.columns.is-mobile>.column.is-1{flex:none;width:8.33333%}.columns.is-mobile>.column.is-offset-1{margin-left:8.33333%}.columns.is-mobile>.column.is-2{flex:none;width:16.66667%}.columns.is-mobile>.column.is-offset-2{margin-left:16.66667%}.columns.is-mobile>.column.is-3{flex:none;width:25%}.columns.is-mobile>.column.is-offset-3{margin-left:25%}.columns.is-mobile>.column.is-4{flex:none;width:33.33333%}.columns.is-mobile>.column.is-offset-4{margin-left:33.33333%}.columns.is-mobile>.column.is-5{flex:none;width:41.66667%}.columns.is-mobile>.column.is-offset-5{margin-left:41.66667%}.columns.is-mobile>.column.is-6{flex:none;width:50%}.columns.is-mobile>.column.is-offset-6{margin-left:50%}.columns.is-mobile>.column.is-7{flex:none;width:58.33333%}.columns.is-mobile>.column.is-offset-7{margin-left:58.33333%}.columns.is-mobile>.column.is-8{flex:none;width:66.66667%}.columns.is-mobile>.column.is-offset-8{margin-left:66.66667%}.columns.is-mobile>.column.is-9{flex:none;width:75%}.columns.is-mobile>.column.is-offset-9{margin-left:75%}.columns.is-mobile>.column.is-10{flex:none;width:83.33333%}.columns.is-mobile>.column.is-offset-10{margin-left:83.33333%}.columns.is-mobile>.column.is-11{flex:none;width:91.66667%}.columns.is-mobile>.column.is-offset-11{margin-left:91.66667%}.columns.is-mobile>.column.is-12{flex:none;width:100%}.columns.is-mobile>.column.is-offset-12{margin-left:100%}@media screen and (max-width:768px){.column.is-narrow-mobile{flex:none}.column.is-full-mobile{flex:none;width:100%}.column.is-three-quarters-mobile{flex:none;width:75%}.column.is-two-thirds-mobile{flex:none;width:66.6666%}.column.is-half-mobile{flex:none;width:50%}.column.is-one-third-mobile{flex:none;width:33.3333%}.column.is-one-quarter-mobile{flex:none;width:25%}.column.is-one-fifth-mobile{flex:none;width:20%}.column.is-two-fifths-mobile{flex:none;width:40%}.column.is-three-fifths-mobile{flex:none;width:60%}.column.is-four-fifths-mobile{flex:none;width:80%}.column.is-offset-three-quarters-mobile{margin-left:75%}.column.is-offset-two-thirds-mobile{margin-left:66.6666%}.column.is-offset-half-mobile{margin-left:50%}.column.is-offset-one-third-mobile{margin-left:33.3333%}.column.is-offset-one-quarter-mobile{margin-left:25%}.column.is-offset-one-fifth-mobile{margin-left:20%}.column.is-offset-two-fifths-mobile{margin-left:40%}.column.is-offset-three-fifths-mobile{margin-left:60%}.column.is-offset-four-fifths-mobile{margin-left:80%}.column.is-0-mobile{flex:none;width:0%}.column.is-offset-0-mobile{margin-left:0}.column.is-1-mobile{flex:none;width:8.33333%}.column.is-offset-1-mobile{margin-left:8.33333%}.column.is-2-mobile{flex:none;width:16.66667%}.column.is-offset-2-mobile{margin-left:16.66667%}.column.is-3-mobile{flex:none;width:25%}.column.is-offset-3-mobile{margin-left:25%}.column.is-4-mobile{flex:none;width:33.33333%}.column.is-offset-4-mobile{margin-left:33.33333%}.column.is-5-mobile{flex:none;width:41.66667%}.column.is-offset-5-mobile{margin-left:41.66667%}.column.is-6-mobile{flex:none;width:50%}.column.is-offset-6-mobile{margin-left:50%}.column.is-7-mobile{flex:none;width:58.33333%}.column.is-offset-7-mobile{margin-left:58.33333%}.column.is-8-mobile{flex:none;width:66.66667%}.column.is-offset-8-mobile{margin-left:66.66667%}.column.is-9-mobile{flex:none;width:75%}.column.is-offset-9-mobile{margin-left:75%}.column.is-10-mobile{flex:none;width:83.33333%}.column.is-offset-10-mobile{margin-left:83.33333%}.column.is-11-mobile{flex:none;width:91.66667%}.column.is-offset-11-mobile{margin-left:91.66667%}.column.is-12-mobile{flex:none;width:100%}.column.is-offset-12-mobile{margin-left:100%}}@media screen and (min-width:769px),print{.column.is-narrow,.column.is-narrow-tablet{flex:none}.column.is-full,.column.is-full-tablet{flex:none;width:100%}.column.is-three-quarters,.column.is-three-quarters-tablet{flex:none;width:75%}.column.is-two-thirds,.column.is-two-thirds-tablet{flex:none;width:66.6666%}.column.is-half,.column.is-half-tablet{flex:none;width:50%}.column.is-one-third,.column.is-one-third-tablet{flex:none;width:33.3333%}.column.is-one-quarter,.column.is-one-quarter-tablet{flex:none;width:25%}.column.is-one-fifth,.column.is-one-fifth-tablet{flex:none;width:20%}.column.is-two-fifths,.column.is-two-fifths-tablet{flex:none;width:40%}.column.is-three-fifths,.column.is-three-fifths-tablet{flex:none;width:60%}.column.is-four-fifths,.column.is-four-fifths-tablet{flex:none;width:80%}.column.is-offset-three-quarters,.column.is-offset-three-quarters-tablet{margin-left:75%}.column.is-offset-two-thirds,.column.is-offset-two-thirds-tablet{margin-left:66.6666%}.column.is-offset-half,.column.is-offset-half-tablet{margin-left:50%}.column.is-offset-one-third,.column.is-offset-one-third-tablet{margin-left:33.3333%}.column.is-offset-one-quarter,.column.is-offset-one-quarter-tablet{margin-left:25%}.column.is-offset-one-fifth,.column.is-offset-one-fifth-tablet{margin-left:20%}.column.is-offset-two-fifths,.column.is-offset-two-fifths-tablet{margin-left:40%}.column.is-offset-three-fifths,.column.is-offset-three-fifths-tablet{margin-left:60%}.column.is-offset-four-fifths,.column.is-offset-four-fifths-tablet{margin-left:80%}.column.is-0,.column.is-0-tablet{flex:none;width:0%}.column.is-offset-0,.column.is-offset-0-tablet{margin-left:0}.column.is-1,.column.is-1-tablet{flex:none;width:8.33333%}.column.is-offset-1,.column.is-offset-1-tablet{margin-left:8.33333%}.column.is-2,.column.is-2-tablet{flex:none;width:16.66667%}.column.is-offset-2,.column.is-offset-2-tablet{margin-left:16.66667%}.column.is-3,.column.is-3-tablet{flex:none;width:25%}.column.is-offset-3,.column.is-offset-3-tablet{margin-left:25%}.column.is-4,.column.is-4-tablet{flex:none;width:33.33333%}.column.is-offset-4,.column.is-offset-4-tablet{margin-left:33.33333%}.column.is-5,.column.is-5-tablet{flex:none;width:41.66667%}.column.is-offset-5,.column.is-offset-5-tablet{margin-left:41.66667%}.column.is-6,.column.is-6-tablet{flex:none;width:50%}.column.is-offset-6,.column.is-offset-6-tablet{margin-left:50%}.column.is-7,.column.is-7-tablet{flex:none;width:58.33333%}.column.is-offset-7,.column.is-offset-7-tablet{margin-left:58.33333%}.column.is-8,.column.is-8-tablet{flex:none;width:66.66667%}.column.is-offset-8,.column.is-offset-8-tablet{margin-left:66.66667%}.column.is-9,.column.is-9-tablet{flex:none;width:75%}.column.is-offset-9,.column.is-offset-9-tablet{margin-left:75%}.column.is-10,.column.is-10-tablet{flex:none;width:83.33333%}.column.is-offset-10,.column.is-offset-10-tablet{margin-left:83.33333%}.column.is-11,.column.is-11-tablet{flex:none;width:91.66667%}.column.is-offset-11,.column.is-offset-11-tablet{margin-left:91.66667%}.column.is-12,.column.is-12-tablet{flex:none;width:100%}.column.is-offset-12,.column.is-offset-12-tablet{margin-left:100%}}@media screen and (max-width:1023px){.column.is-narrow-touch{flex:none}.column.is-full-touch{flex:none;width:100%}.column.is-three-quarters-touch{flex:none;width:75%}.column.is-two-thirds-touch{flex:none;width:66.6666%}.column.is-half-touch{flex:none;width:50%}.column.is-one-third-touch{flex:none;width:33.3333%}.column.is-one-quarter-touch{flex:none;width:25%}.column.is-one-fifth-touch{flex:none;width:20%}.column.is-two-fifths-touch{flex:none;width:40%}.column.is-three-fifths-touch{flex:none;width:60%}.column.is-four-fifths-touch{flex:none;width:80%}.column.is-offset-three-quarters-touch{margin-left:75%}.column.is-offset-two-thirds-touch{margin-left:66.6666%}.column.is-offset-half-touch{margin-left:50%}.column.is-offset-one-third-touch{margin-left:33.3333%}.column.is-offset-one-quarter-touch{margin-left:25%}.column.is-offset-one-fifth-touch{margin-left:20%}.column.is-offset-two-fifths-touch{margin-left:40%}.column.is-offset-three-fifths-touch{margin-left:60%}.column.is-offset-four-fifths-touch{margin-left:80%}.column.is-0-touch{flex:none;width:0%}.column.is-offset-0-touch{margin-left:0}.column.is-1-touch{flex:none;width:8.33333%}.column.is-offset-1-touch{margin-left:8.33333%}.column.is-2-touch{flex:none;width:16.66667%}.column.is-offset-2-touch{margin-left:16.66667%}.column.is-3-touch{flex:none;width:25%}.column.is-offset-3-touch{margin-left:25%}.column.is-4-touch{flex:none;width:33.33333%}.column.is-offset-4-touch{margin-left:33.33333%}.column.is-5-touch{flex:none;width:41.66667%}.column.is-offset-5-touch{margin-left:41.66667%}.column.is-6-touch{flex:none;width:50%}.column.is-offset-6-touch{margin-left:50%}.column.is-7-touch{flex:none;width:58.33333%}.column.is-offset-7-touch{margin-left:58.33333%}.column.is-8-touch{flex:none;width:66.66667%}.column.is-offset-8-touch{margin-left:66.66667%}.column.is-9-touch{flex:none;width:75%}.column.is-offset-9-touch{margin-left:75%}.column.is-10-touch{flex:none;width:83.33333%}.column.is-offset-10-touch{margin-left:83.33333%}.column.is-11-touch{flex:none;width:91.66667%}.column.is-offset-11-touch{margin-left:91.66667%}.column.is-12-touch{flex:none;width:100%}.column.is-offset-12-touch{margin-left:100%}}@media screen and (min-width:1024px){.column.is-narrow-desktop{flex:none}.column.is-full-desktop{flex:none;width:100%}.column.is-three-quarters-desktop{flex:none;width:75%}.column.is-two-thirds-desktop{flex:none;width:66.6666%}.column.is-half-desktop{flex:none;width:50%}.column.is-one-third-desktop{flex:none;width:33.3333%}.column.is-one-quarter-desktop{flex:none;width:25%}.column.is-one-fifth-desktop{flex:none;width:20%}.column.is-two-fifths-desktop{flex:none;width:40%}.column.is-three-fifths-desktop{flex:none;width:60%}.column.is-four-fifths-desktop{flex:none;width:80%}.column.is-offset-three-quarters-desktop{margin-left:75%}.column.is-offset-two-thirds-desktop{margin-left:66.6666%}.column.is-offset-half-desktop{margin-left:50%}.column.is-offset-one-third-desktop{margin-left:33.3333%}.column.is-offset-one-quarter-desktop{margin-left:25%}.column.is-offset-one-fifth-desktop{margin-left:20%}.column.is-offset-two-fifths-desktop{margin-left:40%}.column.is-offset-three-fifths-desktop{margin-left:60%}.column.is-offset-four-fifths-desktop{margin-left:80%}.column.is-0-desktop{flex:none;width:0%}.column.is-offset-0-desktop{margin-left:0}.column.is-1-desktop{flex:none;width:8.33333%}.column.is-offset-1-desktop{margin-left:8.33333%}.column.is-2-desktop{flex:none;width:16.66667%}.column.is-offset-2-desktop{margin-left:16.66667%}.column.is-3-desktop{flex:none;width:25%}.column.is-offset-3-desktop{margin-left:25%}.column.is-4-desktop{flex:none;width:33.33333%}.column.is-offset-4-desktop{margin-left:33.33333%}.column.is-5-desktop{flex:none;width:41.66667%}.column.is-offset-5-desktop{margin-left:41.66667%}.column.is-6-desktop{flex:none;width:50%}.column.is-offset-6-desktop{margin-left:50%}.column.is-7-desktop{flex:none;width:58.33333%}.column.is-offset-7-desktop{margin-left:58.33333%}.column.is-8-desktop{flex:none;width:66.66667%}.column.is-offset-8-desktop{margin-left:66.66667%}.column.is-9-desktop{flex:none;width:75%}.column.is-offset-9-desktop{margin-left:75%}.column.is-10-desktop{flex:none;width:83.33333%}.column.is-offset-10-desktop{margin-left:83.33333%}.column.is-11-desktop{flex:none;width:91.66667%}.column.is-offset-11-desktop{margin-left:91.66667%}.column.is-12-desktop{flex:none;width:100%}.column.is-offset-12-desktop{margin-left:100%}}@media screen and (min-width:1216px){.column.is-narrow-widescreen{flex:none}.column.is-full-widescreen{flex:none;width:100%}.column.is-three-quarters-widescreen{flex:none;width:75%}.column.is-two-thirds-widescreen{flex:none;width:66.6666%}.column.is-half-widescreen{flex:none;width:50%}.column.is-one-third-widescreen{flex:none;width:33.3333%}.column.is-one-quarter-widescreen{flex:none;width:25%}.column.is-one-fifth-widescreen{flex:none;width:20%}.column.is-two-fifths-widescreen{flex:none;width:40%}.column.is-three-fifths-widescreen{flex:none;width:60%}.column.is-four-fifths-widescreen{flex:none;width:80%}.column.is-offset-three-quarters-widescreen{margin-left:75%}.column.is-offset-two-thirds-widescreen{margin-left:66.6666%}.column.is-offset-half-widescreen{margin-left:50%}.column.is-offset-one-third-widescreen{margin-left:33.3333%}.column.is-offset-one-quarter-widescreen{margin-left:25%}.column.is-offset-one-fifth-widescreen{margin-left:20%}.column.is-offset-two-fifths-widescreen{margin-left:40%}.column.is-offset-three-fifths-widescreen{margin-left:60%}.column.is-offset-four-fifths-widescreen{margin-left:80%}.column.is-0-widescreen{flex:none;width:0%}.column.is-offset-0-widescreen{margin-left:0}.column.is-1-widescreen{flex:none;width:8.33333%}.column.is-offset-1-widescreen{margin-left:8.33333%}.column.is-2-widescreen{flex:none;width:16.66667%}.column.is-offset-2-widescreen{margin-left:16.66667%}.column.is-3-widescreen{flex:none;width:25%}.column.is-offset-3-widescreen{margin-left:25%}.column.is-4-widescreen{flex:none;width:33.33333%}.column.is-offset-4-widescreen{margin-left:33.33333%}.column.is-5-widescreen{flex:none;width:41.66667%}.column.is-offset-5-widescreen{margin-left:41.66667%}.column.is-6-widescreen{flex:none;width:50%}.column.is-offset-6-widescreen{margin-left:50%}.column.is-7-widescreen{flex:none;width:58.33333%}.column.is-offset-7-widescreen{margin-left:58.33333%}.column.is-8-widescreen{flex:none;width:66.66667%}.column.is-offset-8-widescreen{margin-left:66.66667%}.column.is-9-widescreen{flex:none;width:75%}.column.is-offset-9-widescreen{margin-left:75%}.column.is-10-widescreen{flex:none;width:83.33333%}.column.is-offset-10-widescreen{margin-left:83.33333%}.column.is-11-widescreen{flex:none;width:91.66667%}.column.is-offset-11-widescreen{margin-left:91.66667%}.column.is-12-widescreen{flex:none;width:100%}.column.is-offset-12-widescreen{margin-left:100%}}@media screen and (min-width:1408px){.column.is-narrow-fullhd{flex:none}.column.is-full-fullhd{flex:none;width:100%}.column.is-three-quarters-fullhd{flex:none;width:75%}.column.is-two-thirds-fullhd{flex:none;width:66.6666%}.column.is-half-fullhd{flex:none;width:50%}.column.is-one-third-fullhd{flex:none;width:33.3333%}.column.is-one-quarter-fullhd{flex:none;width:25%}.column.is-one-fifth-fullhd{flex:none;width:20%}.column.is-two-fifths-fullhd{flex:none;width:40%}.column.is-three-fifths-fullhd{flex:none;width:60%}.column.is-four-fifths-fullhd{flex:none;width:80%}.column.is-offset-three-quarters-fullhd{margin-left:75%}.column.is-offset-two-thirds-fullhd{margin-left:66.6666%}.column.is-offset-half-fullhd{margin-left:50%}.column.is-offset-one-third-fullhd{margin-left:33.3333%}.column.is-offset-one-quarter-fullhd{margin-left:25%}.column.is-offset-one-fifth-fullhd{margin-left:20%}.column.is-offset-two-fifths-fullhd{margin-left:40%}.column.is-offset-three-fifths-fullhd{margin-left:60%}.column.is-offset-four-fifths-fullhd{margin-left:80%}.column.is-0-fullhd{flex:none;width:0%}.column.is-offset-0-fullhd{margin-left:0}.column.is-1-fullhd{flex:none;width:8.33333%}.column.is-offset-1-fullhd{margin-left:8.33333%}.column.is-2-fullhd{flex:none;width:16.66667%}.column.is-offset-2-fullhd{margin-left:16.66667%}.column.is-3-fullhd{flex:none;width:25%}.column.is-offset-3-fullhd{margin-left:25%}.column.is-4-fullhd{flex:none;width:33.33333%}.column.is-offset-4-fullhd{margin-left:33.33333%}.column.is-5-fullhd{flex:none;width:41.66667%}.column.is-offset-5-fullhd{margin-left:41.66667%}.column.is-6-fullhd{flex:none;width:50%}.column.is-offset-6-fullhd{margin-left:50%}.column.is-7-fullhd{flex:none;width:58.33333%}.column.is-offset-7-fullhd{margin-left:58.33333%}.column.is-8-fullhd{flex:none;width:66.66667%}.column.is-offset-8-fullhd{margin-left:66.66667%}.column.is-9-fullhd{flex:none;width:75%}.column.is-offset-9-fullhd{margin-left:75%}.column.is-10-fullhd{flex:none;width:83.33333%}.column.is-offset-10-fullhd{margin-left:83.33333%}.column.is-11-fullhd{flex:none;width:91.66667%}.column.is-offset-11-fullhd{margin-left:91.66667%}.column.is-12-fullhd{flex:none;width:100%}.column.is-offset-12-fullhd{margin-left:100%}}.columns{margin-left:-.75rem;margin-right:-.75rem;margin-top:-.75rem}.columns:last-child{margin-bottom:-.75rem}.columns:not(:last-child){margin-bottom:calc(1.5rem - .75rem)}.columns.is-centered{justify-content:center}.columns.is-gapless{margin-left:0;margin-right:0;margin-top:0}.columns.is-gapless>.column{margin:0;padding:0!important}.columns.is-gapless:not(:last-child){margin-bottom:1.5rem}.columns.is-gapless:last-child{margin-bottom:0}.columns.is-mobile{display:flex}.columns.is-multiline{flex-wrap:wrap}.columns.is-vcentered{align-items:center}@media screen and (min-width:769px),print{.columns:not(.is-desktop){display:flex}}@media screen and (min-width:1024px){.columns.is-desktop{display:flex}}.columns.is-variable{--columnGap:0.75rem;margin-left:calc(-1 * var(--columnGap));margin-right:calc(-1 * var(--columnGap))}.columns.is-variable .column{padding-left:var(--columnGap);padding-right:var(--columnGap)}.columns.is-variable.is-0{--columnGap:0rem}@media screen and (max-width:768px){.columns.is-variable.is-0-mobile{--columnGap:0rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-0-tablet{--columnGap:0rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-0-tablet-only{--columnGap:0rem}}@media screen and (max-width:1023px){.columns.is-variable.is-0-touch{--columnGap:0rem}}@media screen and (min-width:1024px){.columns.is-variable.is-0-desktop{--columnGap:0rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-0-desktop-only{--columnGap:0rem}}@media screen and (min-width:1216px){.columns.is-variable.is-0-widescreen{--columnGap:0rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-0-widescreen-only{--columnGap:0rem}}@media screen and (min-width:1408px){.columns.is-variable.is-0-fullhd{--columnGap:0rem}}.columns.is-variable.is-1{--columnGap:0.25rem}@media screen and (max-width:768px){.columns.is-variable.is-1-mobile{--columnGap:0.25rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-1-tablet{--columnGap:0.25rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-1-tablet-only{--columnGap:0.25rem}}@media screen and (max-width:1023px){.columns.is-variable.is-1-touch{--columnGap:0.25rem}}@media screen and (min-width:1024px){.columns.is-variable.is-1-desktop{--columnGap:0.25rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-1-desktop-only{--columnGap:0.25rem}}@media screen and (min-width:1216px){.columns.is-variable.is-1-widescreen{--columnGap:0.25rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-1-widescreen-only{--columnGap:0.25rem}}@media screen and (min-width:1408px){.columns.is-variable.is-1-fullhd{--columnGap:0.25rem}}.columns.is-variable.is-2{--columnGap:0.5rem}@media screen and (max-width:768px){.columns.is-variable.is-2-mobile{--columnGap:0.5rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-2-tablet{--columnGap:0.5rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-2-tablet-only{--columnGap:0.5rem}}@media screen and (max-width:1023px){.columns.is-variable.is-2-touch{--columnGap:0.5rem}}@media screen and (min-width:1024px){.columns.is-variable.is-2-desktop{--columnGap:0.5rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-2-desktop-only{--columnGap:0.5rem}}@media screen and (min-width:1216px){.columns.is-variable.is-2-widescreen{--columnGap:0.5rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-2-widescreen-only{--columnGap:0.5rem}}@media screen and (min-width:1408px){.columns.is-variable.is-2-fullhd{--columnGap:0.5rem}}.columns.is-variable.is-3{--columnGap:0.75rem}@media screen and (max-width:768px){.columns.is-variable.is-3-mobile{--columnGap:0.75rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-3-tablet{--columnGap:0.75rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-3-tablet-only{--columnGap:0.75rem}}@media screen and (max-width:1023px){.columns.is-variable.is-3-touch{--columnGap:0.75rem}}@media screen and (min-width:1024px){.columns.is-variable.is-3-desktop{--columnGap:0.75rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-3-desktop-only{--columnGap:0.75rem}}@media screen and (min-width:1216px){.columns.is-variable.is-3-widescreen{--columnGap:0.75rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-3-widescreen-only{--columnGap:0.75rem}}@media screen and (min-width:1408px){.columns.is-variable.is-3-fullhd{--columnGap:0.75rem}}.columns.is-variable.is-4{--columnGap:1rem}@media screen and (max-width:768px){.columns.is-variable.is-4-mobile{--columnGap:1rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-4-tablet{--columnGap:1rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-4-tablet-only{--columnGap:1rem}}@media screen and (max-width:1023px){.columns.is-variable.is-4-touch{--columnGap:1rem}}@media screen and (min-width:1024px){.columns.is-variable.is-4-desktop{--columnGap:1rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-4-desktop-only{--columnGap:1rem}}@media screen and (min-width:1216px){.columns.is-variable.is-4-widescreen{--columnGap:1rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-4-widescreen-only{--columnGap:1rem}}@media screen and (min-width:1408px){.columns.is-variable.is-4-fullhd{--columnGap:1rem}}.columns.is-variable.is-5{--columnGap:1.25rem}@media screen and (max-width:768px){.columns.is-variable.is-5-mobile{--columnGap:1.25rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-5-tablet{--columnGap:1.25rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-5-tablet-only{--columnGap:1.25rem}}@media screen and (max-width:1023px){.columns.is-variable.is-5-touch{--columnGap:1.25rem}}@media screen and (min-width:1024px){.columns.is-variable.is-5-desktop{--columnGap:1.25rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-5-desktop-only{--columnGap:1.25rem}}@media screen and (min-width:1216px){.columns.is-variable.is-5-widescreen{--columnGap:1.25rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-5-widescreen-only{--columnGap:1.25rem}}@media screen and (min-width:1408px){.columns.is-variable.is-5-fullhd{--columnGap:1.25rem}}.columns.is-variable.is-6{--columnGap:1.5rem}@media screen and (max-width:768px){.columns.is-variable.is-6-mobile{--columnGap:1.5rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-6-tablet{--columnGap:1.5rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-6-tablet-only{--columnGap:1.5rem}}@media screen and (max-width:1023px){.columns.is-variable.is-6-touch{--columnGap:1.5rem}}@media screen and (min-width:1024px){.columns.is-variable.is-6-desktop{--columnGap:1.5rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-6-desktop-only{--columnGap:1.5rem}}@media screen and (min-width:1216px){.columns.is-variable.is-6-widescreen{--columnGap:1.5rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-6-widescreen-only{--columnGap:1.5rem}}@media screen and (min-width:1408px){.columns.is-variable.is-6-fullhd{--columnGap:1.5rem}}.columns.is-variable.is-7{--columnGap:1.75rem}@media screen and (max-width:768px){.columns.is-variable.is-7-mobile{--columnGap:1.75rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-7-tablet{--columnGap:1.75rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-7-tablet-only{--columnGap:1.75rem}}@media screen and (max-width:1023px){.columns.is-variable.is-7-touch{--columnGap:1.75rem}}@media screen and (min-width:1024px){.columns.is-variable.is-7-desktop{--columnGap:1.75rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-7-desktop-only{--columnGap:1.75rem}}@media screen and (min-width:1216px){.columns.is-variable.is-7-widescreen{--columnGap:1.75rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-7-widescreen-only{--columnGap:1.75rem}}@media screen and (min-width:1408px){.columns.is-variable.is-7-fullhd{--columnGap:1.75rem}}.columns.is-variable.is-8{--columnGap:2rem}@media screen and (max-width:768px){.columns.is-variable.is-8-mobile{--columnGap:2rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-8-tablet{--columnGap:2rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-8-tablet-only{--columnGap:2rem}}@media screen and (max-width:1023px){.columns.is-variable.is-8-touch{--columnGap:2rem}}@media screen and (min-width:1024px){.columns.is-variable.is-8-desktop{--columnGap:2rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-8-desktop-only{--columnGap:2rem}}@media screen and (min-width:1216px){.columns.is-variable.is-8-widescreen{--columnGap:2rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-8-widescreen-only{--columnGap:2rem}}@media screen and (min-width:1408px){.columns.is-variable.is-8-fullhd{--columnGap:2rem}}.tile{align-items:stretch;display:block;flex-basis:0;flex-grow:1;flex-shrink:1;min-height:-webkit-min-content;min-height:-moz-min-content;min-height:min-content}.tile.is-ancestor{margin-left:-.75rem;margin-right:-.75rem;margin-top:-.75rem}.tile.is-ancestor:last-child{margin-bottom:-.75rem}.tile.is-ancestor:not(:last-child){margin-bottom:.75rem}.tile.is-child{margin:0!important}.tile.is-parent{padding:.75rem}.tile.is-vertical{flex-direction:column}.tile.is-vertical>.tile.is-child:not(:last-child){margin-bottom:1.5rem!important}@media screen and (min-width:769px),print{.tile:not(.is-child){display:flex}.tile.is-1{flex:none;width:8.33333%}.tile.is-2{flex:none;width:16.66667%}.tile.is-3{flex:none;width:25%}.tile.is-4{flex:none;width:33.33333%}.tile.is-5{flex:none;width:41.66667%}.tile.is-6{flex:none;width:50%}.tile.is-7{flex:none;width:58.33333%}.tile.is-8{flex:none;width:66.66667%}.tile.is-9{flex:none;width:75%}.tile.is-10{flex:none;width:83.33333%}.tile.is-11{flex:none;width:91.66667%}.tile.is-12{flex:none;width:100%}}.has-text-white{color:#fff!important}a.has-text-white:focus,a.has-text-white:hover{color:#e6e6e6!important}.has-background-white{background-color:#fff!important}.has-text-black{color:#0a0a0a!important}a.has-text-black:focus,a.has-text-black:hover{color:#000!important}.has-background-black{background-color:#0a0a0a!important}.has-text-light{color:#f5f5f5!important}a.has-text-light:focus,a.has-text-light:hover{color:#dbdbdb!important}.has-background-light{background-color:#f5f5f5!important}.has-text-dark{color:#363636!important}a.has-text-dark:focus,a.has-text-dark:hover{color:#1c1c1c!important}.has-background-dark{background-color:#363636!important}.has-text-primary{color:#00d1b2!important}a.has-text-primary:focus,a.has-text-primary:hover{color:#009e86!important}.has-background-primary{background-color:#00d1b2!important}.has-text-primary-light{color:#ebfffc!important}a.has-text-primary-light:focus,a.has-text-primary-light:hover{color:#b8fff4!important}.has-background-primary-light{background-color:#ebfffc!important}.has-text-primary-dark{color:#00947e!important}a.has-text-primary-dark:focus,a.has-text-primary-dark:hover{color:#00c7a9!important}.has-background-primary-dark{background-color:#00947e!important}.has-text-link{color:#3273dc!important}a.has-text-link:focus,a.has-text-link:hover{color:#205bbc!important}.has-background-link{background-color:#3273dc!important}.has-text-link-light{color:#eef3fc!important}a.has-text-link-light:focus,a.has-text-link-light:hover{color:#c2d5f5!important}.has-background-link-light{background-color:#eef3fc!important}.has-text-link-dark{color:#2160c4!important}a.has-text-link-dark:focus,a.has-text-link-dark:hover{color:#3b79de!important}.has-background-link-dark{background-color:#2160c4!important}.has-text-info{color:#3298dc!important}a.has-text-info:focus,a.has-text-info:hover{color:#207dbc!important}.has-background-info{background-color:#3298dc!important}.has-text-info-light{color:#eef6fc!important}a.has-text-info-light:focus,a.has-text-info-light:hover{color:#c2e0f5!important}.has-background-info-light{background-color:#eef6fc!important}.has-text-info-dark{color:#1d72aa!important}a.has-text-info-dark:focus,a.has-text-info-dark:hover{color:#248fd6!important}.has-background-info-dark{background-color:#1d72aa!important}.has-text-success{color:#48c774!important}a.has-text-success:focus,a.has-text-success:hover{color:#34a85c!important}.has-background-success{background-color:#48c774!important}.has-text-success-light{color:#effaf3!important}a.has-text-success-light:focus,a.has-text-success-light:hover{color:#c8eed6!important}.has-background-success-light{background-color:#effaf3!important}.has-text-success-dark{color:#257942!important}a.has-text-success-dark:focus,a.has-text-success-dark:hover{color:#31a058!important}.has-background-success-dark{background-color:#257942!important}.has-text-warning{color:#ffdd57!important}a.has-text-warning:focus,a.has-text-warning:hover{color:#ffd324!important}.has-background-warning{background-color:#ffdd57!important}.has-text-warning-light{color:#fffbeb!important}a.has-text-warning-light:focus,a.has-text-warning-light:hover{color:#fff1b8!important}.has-background-warning-light{background-color:#fffbeb!important}.has-text-warning-dark{color:#947600!important}a.has-text-warning-dark:focus,a.has-text-warning-dark:hover{color:#c79f00!important}.has-background-warning-dark{background-color:#947600!important}.has-text-danger{color:#f14668!important}a.has-text-danger:focus,a.has-text-danger:hover{color:#ee1742!important}.has-background-danger{background-color:#f14668!important}.has-text-danger-light{color:#feecf0!important}a.has-text-danger-light:focus,a.has-text-danger-light:hover{color:#fabdc9!important}.has-background-danger-light{background-color:#feecf0!important}.has-text-danger-dark{color:#cc0f35!important}a.has-text-danger-dark:focus,a.has-text-danger-dark:hover{color:#ee2049!important}.has-background-danger-dark{background-color:#cc0f35!important}.has-text-black-bis{color:#121212!important}.has-background-black-bis{background-color:#121212!important}.has-text-black-ter{color:#242424!important}.has-background-black-ter{background-color:#242424!important}.has-text-grey-darker{color:#363636!important}.has-background-grey-darker{background-color:#363636!important}.has-text-grey-dark{color:#4a4a4a!important}.has-background-grey-dark{background-color:#4a4a4a!important}.has-text-grey{color:#7a7a7a!important}.has-background-grey{background-color:#7a7a7a!important}.has-text-grey-light{color:#b5b5b5!important}.has-background-grey-light{background-color:#b5b5b5!important}.has-text-grey-lighter{color:#dbdbdb!important}.has-background-grey-lighter{background-color:#dbdbdb!important}.has-text-white-ter{color:#f5f5f5!important}.has-background-white-ter{background-color:#f5f5f5!important}.has-text-white-bis{color:#fafafa!important}.has-background-white-bis{background-color:#fafafa!important}.is-flex-direction-row{flex-direction:row!important}.is-flex-direction-row-reverse{flex-direction:row-reverse!important}.is-flex-direction-column{flex-direction:column!important}.is-flex-direction-column-reverse{flex-direction:column-reverse!important}.is-flex-wrap-nowrap{flex-wrap:nowrap!important}.is-flex-wrap-wrap{flex-wrap:wrap!important}.is-flex-wrap-wrap-reverse{flex-wrap:wrap-reverse!important}.is-justify-content-flex-start{justify-content:flex-start!important}.is-justify-content-flex-end{justify-content:flex-end!important}.is-justify-content-center{justify-content:center!important}.is-justify-content-space-between{justify-content:space-between!important}.is-justify-content-space-around{justify-content:space-around!important}.is-justify-content-space-evenly{justify-content:space-evenly!important}.is-justify-content-start{justify-content:start!important}.is-justify-content-end{justify-content:end!important}.is-justify-content-left{justify-content:left!important}.is-justify-content-right{justify-content:right!important}.is-align-content-flex-start{align-content:flex-start!important}.is-align-content-flex-end{align-content:flex-end!important}.is-align-content-center{align-content:center!important}.is-align-content-space-between{align-content:space-between!important}.is-align-content-space-around{align-content:space-around!important}.is-align-content-space-evenly{align-content:space-evenly!important}.is-align-content-stretch{align-content:stretch!important}.is-align-content-start{align-content:start!important}.is-align-content-end{align-content:end!important}.is-align-content-baseline{align-content:baseline!important}.is-align-items-stretch{align-items:stretch!important}.is-align-items-flex-start{align-items:flex-start!important}.is-align-items-flex-end{align-items:flex-end!important}.is-align-items-center{align-items:center!important}.is-align-items-baseline{align-items:baseline!important}.is-align-items-start{align-items:start!important}.is-align-items-end{align-items:end!important}.is-align-items-self-start{align-items:self-start!important}.is-align-items-self-end{align-items:self-end!important}.is-align-self-auto{align-self:auto!important}.is-align-self-flex-start{align-self:flex-start!important}.is-align-self-flex-end{align-self:flex-end!important}.is-align-self-center{align-self:center!important}.is-align-self-baseline{align-self:baseline!important}.is-align-self-stretch{align-self:stretch!important}.is-flex-grow-0{flex-grow:0!important}.is-flex-grow-1{flex-grow:1!important}.is-flex-grow-2{flex-grow:2!important}.is-flex-grow-3{flex-grow:3!important}.is-flex-grow-4{flex-grow:4!important}.is-flex-grow-5{flex-grow:5!important}.is-flex-shrink-0{flex-shrink:0!important}.is-flex-shrink-1{flex-shrink:1!important}.is-flex-shrink-2{flex-shrink:2!important}.is-flex-shrink-3{flex-shrink:3!important}.is-flex-shrink-4{flex-shrink:4!important}.is-flex-shrink-5{flex-shrink:5!important}.is-clearfix::after{clear:both;content:" ";display:table}.is-pulled-left{float:left!important}.is-pulled-right{float:right!important}.is-radiusless{border-radius:0!important}.is-shadowless{box-shadow:none!important}.is-clickable{cursor:pointer!important}.is-clipped{overflow:hidden!important}.is-relative{position:relative!important}.is-marginless{margin:0!important}.is-paddingless{padding:0!important}.m-0{margin:0!important}.mt-0{margin-top:0!important}.mr-0{margin-right:0!important}.mb-0{margin-bottom:0!important}.ml-0{margin-left:0!important}.mx-0{margin-left:0!important;margin-right:0!important}.my-0{margin-top:0!important;margin-bottom:0!important}.m-1{margin:.25rem!important}.mt-1{margin-top:.25rem!important}.mr-1{margin-right:.25rem!important}.mb-1{margin-bottom:.25rem!important}.ml-1{margin-left:.25rem!important}.mx-1{margin-left:.25rem!important;margin-right:.25rem!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.m-2{margin:.5rem!important}.mt-2{margin-top:.5rem!important}.mr-2{margin-right:.5rem!important}.mb-2{margin-bottom:.5rem!important}.ml-2{margin-left:.5rem!important}.mx-2{margin-left:.5rem!important;margin-right:.5rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.m-3{margin:.75rem!important}.mt-3{margin-top:.75rem!important}.mr-3{margin-right:.75rem!important}.mb-3{margin-bottom:.75rem!important}.ml-3{margin-left:.75rem!important}.mx-3{margin-left:.75rem!important;margin-right:.75rem!important}.my-3{margin-top:.75rem!important;margin-bottom:.75rem!important}.m-4{margin:1rem!important}.mt-4{margin-top:1rem!important}.mr-4{margin-right:1rem!important}.mb-4{margin-bottom:1rem!important}.ml-4{margin-left:1rem!important}.mx-4{margin-left:1rem!important;margin-right:1rem!important}.my-4{margin-top:1rem!important;margin-bottom:1rem!important}.m-5{margin:1.5rem!important}.mt-5{margin-top:1.5rem!important}.mr-5{margin-right:1.5rem!important}.mb-5{margin-bottom:1.5rem!important}.ml-5{margin-left:1.5rem!important}.mx-5{margin-left:1.5rem!important;margin-right:1.5rem!important}.my-5{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.m-6{margin:3rem!important}.mt-6{margin-top:3rem!important}.mr-6{margin-right:3rem!important}.mb-6{margin-bottom:3rem!important}.ml-6{margin-left:3rem!important}.mx-6{margin-left:3rem!important;margin-right:3rem!important}.my-6{margin-top:3rem!important;margin-bottom:3rem!important}.p-0{padding:0!important}.pt-0{padding-top:0!important}.pr-0{padding-right:0!important}.pb-0{padding-bottom:0!important}.pl-0{padding-left:0!important}.px-0{padding-left:0!important;padding-right:0!important}.py-0{padding-top:0!important;padding-bottom:0!important}.p-1{padding:.25rem!important}.pt-1{padding-top:.25rem!important}.pr-1{padding-right:.25rem!important}.pb-1{padding-bottom:.25rem!important}.pl-1{padding-left:.25rem!important}.px-1{padding-left:.25rem!important;padding-right:.25rem!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.p-2{padding:.5rem!important}.pt-2{padding-top:.5rem!important}.pr-2{padding-right:.5rem!important}.pb-2{padding-bottom:.5rem!important}.pl-2{padding-left:.5rem!important}.px-2{padding-left:.5rem!important;padding-right:.5rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.p-3{padding:.75rem!important}.pt-3{padding-top:.75rem!important}.pr-3{padding-right:.75rem!important}.pb-3{padding-bottom:.75rem!important}.pl-3{padding-left:.75rem!important}.px-3{padding-left:.75rem!important;padding-right:.75rem!important}.py-3{padding-top:.75rem!important;padding-bottom:.75rem!important}.p-4{padding:1rem!important}.pt-4{padding-top:1rem!important}.pr-4{padding-right:1rem!important}.pb-4{padding-bottom:1rem!important}.pl-4{padding-left:1rem!important}.px-4{padding-left:1rem!important;padding-right:1rem!important}.py-4{padding-top:1rem!important;padding-bottom:1rem!important}.p-5{padding:1.5rem!important}.pt-5{padding-top:1.5rem!important}.pr-5{padding-right:1.5rem!important}.pb-5{padding-bottom:1.5rem!important}.pl-5{padding-left:1.5rem!important}.px-5{padding-left:1.5rem!important;padding-right:1.5rem!important}.py-5{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.p-6{padding:3rem!important}.pt-6{padding-top:3rem!important}.pr-6{padding-right:3rem!important}.pb-6{padding-bottom:3rem!important}.pl-6{padding-left:3rem!important}.px-6{padding-left:3rem!important;padding-right:3rem!important}.py-6{padding-top:3rem!important;padding-bottom:3rem!important}.is-size-1{font-size:3rem!important}.is-size-2{font-size:2.5rem!important}.is-size-3{font-size:2rem!important}.is-size-4{font-size:1.5rem!important}.is-size-5{font-size:1.25rem!important}.is-size-6{font-size:1rem!important}.is-size-7{font-size:.75rem!important}@media screen and (max-width:768px){.is-size-1-mobile{font-size:3rem!important}.is-size-2-mobile{font-size:2.5rem!important}.is-size-3-mobile{font-size:2rem!important}.is-size-4-mobile{font-size:1.5rem!important}.is-size-5-mobile{font-size:1.25rem!important}.is-size-6-mobile{font-size:1rem!important}.is-size-7-mobile{font-size:.75rem!important}}@media screen and (min-width:769px),print{.is-size-1-tablet{font-size:3rem!important}.is-size-2-tablet{font-size:2.5rem!important}.is-size-3-tablet{font-size:2rem!important}.is-size-4-tablet{font-size:1.5rem!important}.is-size-5-tablet{font-size:1.25rem!important}.is-size-6-tablet{font-size:1rem!important}.is-size-7-tablet{font-size:.75rem!important}}@media screen and (max-width:1023px){.is-size-1-touch{font-size:3rem!important}.is-size-2-touch{font-size:2.5rem!important}.is-size-3-touch{font-size:2rem!important}.is-size-4-touch{font-size:1.5rem!important}.is-size-5-touch{font-size:1.25rem!important}.is-size-6-touch{font-size:1rem!important}.is-size-7-touch{font-size:.75rem!important}}@media screen and (min-width:1024px){.is-size-1-desktop{font-size:3rem!important}.is-size-2-desktop{font-size:2.5rem!important}.is-size-3-desktop{font-size:2rem!important}.is-size-4-desktop{font-size:1.5rem!important}.is-size-5-desktop{font-size:1.25rem!important}.is-size-6-desktop{font-size:1rem!important}.is-size-7-desktop{font-size:.75rem!important}}@media screen and (min-width:1216px){.is-size-1-widescreen{font-size:3rem!important}.is-size-2-widescreen{font-size:2.5rem!important}.is-size-3-widescreen{font-size:2rem!important}.is-size-4-widescreen{font-size:1.5rem!important}.is-size-5-widescreen{font-size:1.25rem!important}.is-size-6-widescreen{font-size:1rem!important}.is-size-7-widescreen{font-size:.75rem!important}}@media screen and (min-width:1408px){.is-size-1-fullhd{font-size:3rem!important}.is-size-2-fullhd{font-size:2.5rem!important}.is-size-3-fullhd{font-size:2rem!important}.is-size-4-fullhd{font-size:1.5rem!important}.is-size-5-fullhd{font-size:1.25rem!important}.is-size-6-fullhd{font-size:1rem!important}.is-size-7-fullhd{font-size:.75rem!important}}.has-text-centered{text-align:center!important}.has-text-justified{text-align:justify!important}.has-text-left{text-align:left!important}.has-text-right{text-align:right!important}@media screen and (max-width:768px){.has-text-centered-mobile{text-align:center!important}}@media screen and (min-width:769px),print{.has-text-centered-tablet{text-align:center!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-centered-tablet-only{text-align:center!important}}@media screen and (max-width:1023px){.has-text-centered-touch{text-align:center!important}}@media screen and (min-width:1024px){.has-text-centered-desktop{text-align:center!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-centered-desktop-only{text-align:center!important}}@media screen and (min-width:1216px){.has-text-centered-widescreen{text-align:center!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-centered-widescreen-only{text-align:center!important}}@media screen and (min-width:1408px){.has-text-centered-fullhd{text-align:center!important}}@media screen and (max-width:768px){.has-text-justified-mobile{text-align:justify!important}}@media screen and (min-width:769px),print{.has-text-justified-tablet{text-align:justify!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-justified-tablet-only{text-align:justify!important}}@media screen and (max-width:1023px){.has-text-justified-touch{text-align:justify!important}}@media screen and (min-width:1024px){.has-text-justified-desktop{text-align:justify!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-justified-desktop-only{text-align:justify!important}}@media screen and (min-width:1216px){.has-text-justified-widescreen{text-align:justify!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-justified-widescreen-only{text-align:justify!important}}@media screen and (min-width:1408px){.has-text-justified-fullhd{text-align:justify!important}}@media screen and (max-width:768px){.has-text-left-mobile{text-align:left!important}}@media screen and (min-width:769px),print{.has-text-left-tablet{text-align:left!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-left-tablet-only{text-align:left!important}}@media screen and (max-width:1023px){.has-text-left-touch{text-align:left!important}}@media screen and (min-width:1024px){.has-text-left-desktop{text-align:left!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-left-desktop-only{text-align:left!important}}@media screen and (min-width:1216px){.has-text-left-widescreen{text-align:left!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-left-widescreen-only{text-align:left!important}}@media screen and (min-width:1408px){.has-text-left-fullhd{text-align:left!important}}@media screen and (max-width:768px){.has-text-right-mobile{text-align:right!important}}@media screen and (min-width:769px),print{.has-text-right-tablet{text-align:right!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-right-tablet-only{text-align:right!important}}@media screen and (max-width:1023px){.has-text-right-touch{text-align:right!important}}@media screen and (min-width:1024px){.has-text-right-desktop{text-align:right!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-right-desktop-only{text-align:right!important}}@media screen and (min-width:1216px){.has-text-right-widescreen{text-align:right!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-right-widescreen-only{text-align:right!important}}@media screen and (min-width:1408px){.has-text-right-fullhd{text-align:right!important}}.is-capitalized{text-transform:capitalize!important}.is-lowercase{text-transform:lowercase!important}.is-uppercase{text-transform:uppercase!important}.is-italic{font-style:italic!important}.has-text-weight-light{font-weight:300!important}.has-text-weight-normal{font-weight:400!important}.has-text-weight-medium{font-weight:500!important}.has-text-weight-semibold{font-weight:600!important}.has-text-weight-bold{font-weight:700!important}.is-family-primary{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif!important}.is-family-secondary{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif!important}.is-family-sans-serif{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif!important}.is-family-monospace{font-family:monospace!important}.is-family-code{font-family:monospace!important}.is-block{display:block!important}@media screen and (max-width:768px){.is-block-mobile{display:block!important}}@media screen and (min-width:769px),print{.is-block-tablet{display:block!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-block-tablet-only{display:block!important}}@media screen and (max-width:1023px){.is-block-touch{display:block!important}}@media screen and (min-width:1024px){.is-block-desktop{display:block!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-block-desktop-only{display:block!important}}@media screen and (min-width:1216px){.is-block-widescreen{display:block!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-block-widescreen-only{display:block!important}}@media screen and (min-width:1408px){.is-block-fullhd{display:block!important}}.is-flex{display:flex!important}@media screen and (max-width:768px){.is-flex-mobile{display:flex!important}}@media screen and (min-width:769px),print{.is-flex-tablet{display:flex!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-flex-tablet-only{display:flex!important}}@media screen and (max-width:1023px){.is-flex-touch{display:flex!important}}@media screen and (min-width:1024px){.is-flex-desktop{display:flex!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-flex-desktop-only{display:flex!important}}@media screen and (min-width:1216px){.is-flex-widescreen{display:flex!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-flex-widescreen-only{display:flex!important}}@media screen and (min-width:1408px){.is-flex-fullhd{display:flex!important}}.is-inline{display:inline!important}@media screen and (max-width:768px){.is-inline-mobile{display:inline!important}}@media screen and (min-width:769px),print{.is-inline-tablet{display:inline!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-inline-tablet-only{display:inline!important}}@media screen and (max-width:1023px){.is-inline-touch{display:inline!important}}@media screen and (min-width:1024px){.is-inline-desktop{display:inline!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-inline-desktop-only{display:inline!important}}@media screen and (min-width:1216px){.is-inline-widescreen{display:inline!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-inline-widescreen-only{display:inline!important}}@media screen and (min-width:1408px){.is-inline-fullhd{display:inline!important}}.is-inline-block{display:inline-block!important}@media screen and (max-width:768px){.is-inline-block-mobile{display:inline-block!important}}@media screen and (min-width:769px),print{.is-inline-block-tablet{display:inline-block!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-inline-block-tablet-only{display:inline-block!important}}@media screen and (max-width:1023px){.is-inline-block-touch{display:inline-block!important}}@media screen and (min-width:1024px){.is-inline-block-desktop{display:inline-block!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-inline-block-desktop-only{display:inline-block!important}}@media screen and (min-width:1216px){.is-inline-block-widescreen{display:inline-block!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-inline-block-widescreen-only{display:inline-block!important}}@media screen and (min-width:1408px){.is-inline-block-fullhd{display:inline-block!important}}.is-inline-flex{display:inline-flex!important}@media screen and (max-width:768px){.is-inline-flex-mobile{display:inline-flex!important}}@media screen and (min-width:769px),print{.is-inline-flex-tablet{display:inline-flex!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-inline-flex-tablet-only{display:inline-flex!important}}@media screen and (max-width:1023px){.is-inline-flex-touch{display:inline-flex!important}}@media screen and (min-width:1024px){.is-inline-flex-desktop{display:inline-flex!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-inline-flex-desktop-only{display:inline-flex!important}}@media screen and (min-width:1216px){.is-inline-flex-widescreen{display:inline-flex!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-inline-flex-widescreen-only{display:inline-flex!important}}@media screen and (min-width:1408px){.is-inline-flex-fullhd{display:inline-flex!important}}.is-hidden{display:none!important}.is-sr-only{border:none!important;clip:rect(0,0,0,0)!important;height:.01em!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:.01em!important}@media screen and (max-width:768px){.is-hidden-mobile{display:none!important}}@media screen and (min-width:769px),print{.is-hidden-tablet{display:none!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-hidden-tablet-only{display:none!important}}@media screen and (max-width:1023px){.is-hidden-touch{display:none!important}}@media screen and (min-width:1024px){.is-hidden-desktop{display:none!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-hidden-desktop-only{display:none!important}}@media screen and (min-width:1216px){.is-hidden-widescreen{display:none!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-hidden-widescreen-only{display:none!important}}@media screen and (min-width:1408px){.is-hidden-fullhd{display:none!important}}.is-invisible{visibility:hidden!important}@media screen and (max-width:768px){.is-invisible-mobile{visibility:hidden!important}}@media screen and (min-width:769px),print{.is-invisible-tablet{visibility:hidden!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-invisible-tablet-only{visibility:hidden!important}}@media screen and (max-width:1023px){.is-invisible-touch{visibility:hidden!important}}@media screen and (min-width:1024px){.is-invisible-desktop{visibility:hidden!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-invisible-desktop-only{visibility:hidden!important}}@media screen and (min-width:1216px){.is-invisible-widescreen{visibility:hidden!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-invisible-widescreen-only{visibility:hidden!important}}@media screen and (min-width:1408px){.is-invisible-fullhd{visibility:hidden!important}}.hero{align-items:stretch;display:flex;flex-direction:column;justify-content:space-between}.hero .navbar{background:0 0}.hero .tabs ul{border-bottom:none}.hero.is-white{background-color:#fff;color:#0a0a0a}.hero.is-white a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-white strong{color:inherit}.hero.is-white .title{color:#0a0a0a}.hero.is-white .subtitle{color:rgba(10,10,10,.9)}.hero.is-white .subtitle a:not(.button),.hero.is-white .subtitle strong{color:#0a0a0a}@media screen and (max-width:1023px){.hero.is-white .navbar-menu{background-color:#fff}}.hero.is-white .navbar-item,.hero.is-white .navbar-link{color:rgba(10,10,10,.7)}.hero.is-white .navbar-link.is-active,.hero.is-white .navbar-link:hover,.hero.is-white a.navbar-item.is-active,.hero.is-white a.navbar-item:hover{background-color:#f2f2f2;color:#0a0a0a}.hero.is-white .tabs a{color:#0a0a0a;opacity:.9}.hero.is-white .tabs a:hover{opacity:1}.hero.is-white .tabs li.is-active a{opacity:1}.hero.is-white .tabs.is-boxed a,.hero.is-white .tabs.is-toggle a{color:#0a0a0a}.hero.is-white .tabs.is-boxed a:hover,.hero.is-white .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-white .tabs.is-boxed li.is-active a,.hero.is-white .tabs.is-boxed li.is-active a:hover,.hero.is-white .tabs.is-toggle li.is-active a,.hero.is-white .tabs.is-toggle li.is-active a:hover{background-color:#0a0a0a;border-color:#0a0a0a;color:#fff}.hero.is-white.is-bold{background-image:linear-gradient(141deg,#e6e6e6 0,#fff 71%,#fff 100%)}@media screen and (max-width:768px){.hero.is-white.is-bold .navbar-menu{background-image:linear-gradient(141deg,#e6e6e6 0,#fff 71%,#fff 100%)}}.hero.is-black{background-color:#0a0a0a;color:#fff}.hero.is-black a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-black strong{color:inherit}.hero.is-black .title{color:#fff}.hero.is-black .subtitle{color:rgba(255,255,255,.9)}.hero.is-black .subtitle a:not(.button),.hero.is-black .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-black .navbar-menu{background-color:#0a0a0a}}.hero.is-black .navbar-item,.hero.is-black .navbar-link{color:rgba(255,255,255,.7)}.hero.is-black .navbar-link.is-active,.hero.is-black .navbar-link:hover,.hero.is-black a.navbar-item.is-active,.hero.is-black a.navbar-item:hover{background-color:#000;color:#fff}.hero.is-black .tabs a{color:#fff;opacity:.9}.hero.is-black .tabs a:hover{opacity:1}.hero.is-black .tabs li.is-active a{opacity:1}.hero.is-black .tabs.is-boxed a,.hero.is-black .tabs.is-toggle a{color:#fff}.hero.is-black .tabs.is-boxed a:hover,.hero.is-black .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-black .tabs.is-boxed li.is-active a,.hero.is-black .tabs.is-boxed li.is-active a:hover,.hero.is-black .tabs.is-toggle li.is-active a,.hero.is-black .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#0a0a0a}.hero.is-black.is-bold{background-image:linear-gradient(141deg,#000 0,#0a0a0a 71%,#181616 100%)}@media screen and (max-width:768px){.hero.is-black.is-bold .navbar-menu{background-image:linear-gradient(141deg,#000 0,#0a0a0a 71%,#181616 100%)}}.hero.is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.hero.is-light a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-light strong{color:inherit}.hero.is-light .title{color:rgba(0,0,0,.7)}.hero.is-light .subtitle{color:rgba(0,0,0,.9)}.hero.is-light .subtitle a:not(.button),.hero.is-light .subtitle strong{color:rgba(0,0,0,.7)}@media screen and (max-width:1023px){.hero.is-light .navbar-menu{background-color:#f5f5f5}}.hero.is-light .navbar-item,.hero.is-light .navbar-link{color:rgba(0,0,0,.7)}.hero.is-light .navbar-link.is-active,.hero.is-light .navbar-link:hover,.hero.is-light a.navbar-item.is-active,.hero.is-light a.navbar-item:hover{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.hero.is-light .tabs a{color:rgba(0,0,0,.7);opacity:.9}.hero.is-light .tabs a:hover{opacity:1}.hero.is-light .tabs li.is-active a{opacity:1}.hero.is-light .tabs.is-boxed a,.hero.is-light .tabs.is-toggle a{color:rgba(0,0,0,.7)}.hero.is-light .tabs.is-boxed a:hover,.hero.is-light .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-light .tabs.is-boxed li.is-active a,.hero.is-light .tabs.is-boxed li.is-active a:hover,.hero.is-light .tabs.is-toggle li.is-active a,.hero.is-light .tabs.is-toggle li.is-active a:hover{background-color:rgba(0,0,0,.7);border-color:rgba(0,0,0,.7);color:#f5f5f5}.hero.is-light.is-bold{background-image:linear-gradient(141deg,#dfd8d9 0,#f5f5f5 71%,#fff 100%)}@media screen and (max-width:768px){.hero.is-light.is-bold .navbar-menu{background-image:linear-gradient(141deg,#dfd8d9 0,#f5f5f5 71%,#fff 100%)}}.hero.is-dark{background-color:#363636;color:#fff}.hero.is-dark a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-dark strong{color:inherit}.hero.is-dark .title{color:#fff}.hero.is-dark .subtitle{color:rgba(255,255,255,.9)}.hero.is-dark .subtitle a:not(.button),.hero.is-dark .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-dark .navbar-menu{background-color:#363636}}.hero.is-dark .navbar-item,.hero.is-dark .navbar-link{color:rgba(255,255,255,.7)}.hero.is-dark .navbar-link.is-active,.hero.is-dark .navbar-link:hover,.hero.is-dark a.navbar-item.is-active,.hero.is-dark a.navbar-item:hover{background-color:#292929;color:#fff}.hero.is-dark .tabs a{color:#fff;opacity:.9}.hero.is-dark .tabs a:hover{opacity:1}.hero.is-dark .tabs li.is-active a{opacity:1}.hero.is-dark .tabs.is-boxed a,.hero.is-dark .tabs.is-toggle a{color:#fff}.hero.is-dark .tabs.is-boxed a:hover,.hero.is-dark .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-dark .tabs.is-boxed li.is-active a,.hero.is-dark .tabs.is-boxed li.is-active a:hover,.hero.is-dark .tabs.is-toggle li.is-active a,.hero.is-dark .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#363636}.hero.is-dark.is-bold{background-image:linear-gradient(141deg,#1f191a 0,#363636 71%,#46403f 100%)}@media screen and (max-width:768px){.hero.is-dark.is-bold .navbar-menu{background-image:linear-gradient(141deg,#1f191a 0,#363636 71%,#46403f 100%)}}.hero.is-primary{background-color:#00d1b2;color:#fff}.hero.is-primary a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-primary strong{color:inherit}.hero.is-primary .title{color:#fff}.hero.is-primary .subtitle{color:rgba(255,255,255,.9)}.hero.is-primary .subtitle a:not(.button),.hero.is-primary .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-primary .navbar-menu{background-color:#00d1b2}}.hero.is-primary .navbar-item,.hero.is-primary .navbar-link{color:rgba(255,255,255,.7)}.hero.is-primary .navbar-link.is-active,.hero.is-primary .navbar-link:hover,.hero.is-primary a.navbar-item.is-active,.hero.is-primary a.navbar-item:hover{background-color:#00b89c;color:#fff}.hero.is-primary .tabs a{color:#fff;opacity:.9}.hero.is-primary .tabs a:hover{opacity:1}.hero.is-primary .tabs li.is-active a{opacity:1}.hero.is-primary .tabs.is-boxed a,.hero.is-primary .tabs.is-toggle a{color:#fff}.hero.is-primary .tabs.is-boxed a:hover,.hero.is-primary .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-primary .tabs.is-boxed li.is-active a,.hero.is-primary .tabs.is-boxed li.is-active a:hover,.hero.is-primary .tabs.is-toggle li.is-active a,.hero.is-primary .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#00d1b2}.hero.is-primary.is-bold{background-image:linear-gradient(141deg,#009e6c 0,#00d1b2 71%,#00e7eb 100%)}@media screen and (max-width:768px){.hero.is-primary.is-bold .navbar-menu{background-image:linear-gradient(141deg,#009e6c 0,#00d1b2 71%,#00e7eb 100%)}}.hero.is-link{background-color:#3273dc;color:#fff}.hero.is-link a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-link strong{color:inherit}.hero.is-link .title{color:#fff}.hero.is-link .subtitle{color:rgba(255,255,255,.9)}.hero.is-link .subtitle a:not(.button),.hero.is-link .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-link .navbar-menu{background-color:#3273dc}}.hero.is-link .navbar-item,.hero.is-link .navbar-link{color:rgba(255,255,255,.7)}.hero.is-link .navbar-link.is-active,.hero.is-link .navbar-link:hover,.hero.is-link a.navbar-item.is-active,.hero.is-link a.navbar-item:hover{background-color:#2366d1;color:#fff}.hero.is-link .tabs a{color:#fff;opacity:.9}.hero.is-link .tabs a:hover{opacity:1}.hero.is-link .tabs li.is-active a{opacity:1}.hero.is-link .tabs.is-boxed a,.hero.is-link .tabs.is-toggle a{color:#fff}.hero.is-link .tabs.is-boxed a:hover,.hero.is-link .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-link .tabs.is-boxed li.is-active a,.hero.is-link .tabs.is-boxed li.is-active a:hover,.hero.is-link .tabs.is-toggle li.is-active a,.hero.is-link .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#3273dc}.hero.is-link.is-bold{background-image:linear-gradient(141deg,#1577c6 0,#3273dc 71%,#4366e5 100%)}@media screen and (max-width:768px){.hero.is-link.is-bold .navbar-menu{background-image:linear-gradient(141deg,#1577c6 0,#3273dc 71%,#4366e5 100%)}}.hero.is-info{background-color:#3298dc;color:#fff}.hero.is-info a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-info strong{color:inherit}.hero.is-info .title{color:#fff}.hero.is-info .subtitle{color:rgba(255,255,255,.9)}.hero.is-info .subtitle a:not(.button),.hero.is-info .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-info .navbar-menu{background-color:#3298dc}}.hero.is-info .navbar-item,.hero.is-info .navbar-link{color:rgba(255,255,255,.7)}.hero.is-info .navbar-link.is-active,.hero.is-info .navbar-link:hover,.hero.is-info a.navbar-item.is-active,.hero.is-info a.navbar-item:hover{background-color:#238cd1;color:#fff}.hero.is-info .tabs a{color:#fff;opacity:.9}.hero.is-info .tabs a:hover{opacity:1}.hero.is-info .tabs li.is-active a{opacity:1}.hero.is-info .tabs.is-boxed a,.hero.is-info .tabs.is-toggle a{color:#fff}.hero.is-info .tabs.is-boxed a:hover,.hero.is-info .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-info .tabs.is-boxed li.is-active a,.hero.is-info .tabs.is-boxed li.is-active a:hover,.hero.is-info .tabs.is-toggle li.is-active a,.hero.is-info .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#3298dc}.hero.is-info.is-bold{background-image:linear-gradient(141deg,#159dc6 0,#3298dc 71%,#4389e5 100%)}@media screen and (max-width:768px){.hero.is-info.is-bold .navbar-menu{background-image:linear-gradient(141deg,#159dc6 0,#3298dc 71%,#4389e5 100%)}}.hero.is-success{background-color:#48c774;color:#fff}.hero.is-success a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-success strong{color:inherit}.hero.is-success .title{color:#fff}.hero.is-success .subtitle{color:rgba(255,255,255,.9)}.hero.is-success .subtitle a:not(.button),.hero.is-success .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-success .navbar-menu{background-color:#48c774}}.hero.is-success .navbar-item,.hero.is-success .navbar-link{color:rgba(255,255,255,.7)}.hero.is-success .navbar-link.is-active,.hero.is-success .navbar-link:hover,.hero.is-success a.navbar-item.is-active,.hero.is-success a.navbar-item:hover{background-color:#3abb67;color:#fff}.hero.is-success .tabs a{color:#fff;opacity:.9}.hero.is-success .tabs a:hover{opacity:1}.hero.is-success .tabs li.is-active a{opacity:1}.hero.is-success .tabs.is-boxed a,.hero.is-success .tabs.is-toggle a{color:#fff}.hero.is-success .tabs.is-boxed a:hover,.hero.is-success .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-success .tabs.is-boxed li.is-active a,.hero.is-success .tabs.is-boxed li.is-active a:hover,.hero.is-success .tabs.is-toggle li.is-active a,.hero.is-success .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#48c774}.hero.is-success.is-bold{background-image:linear-gradient(141deg,#29b342 0,#48c774 71%,#56d296 100%)}@media screen and (max-width:768px){.hero.is-success.is-bold .navbar-menu{background-image:linear-gradient(141deg,#29b342 0,#48c774 71%,#56d296 100%)}}.hero.is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.hero.is-warning a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-warning strong{color:inherit}.hero.is-warning .title{color:rgba(0,0,0,.7)}.hero.is-warning .subtitle{color:rgba(0,0,0,.9)}.hero.is-warning .subtitle a:not(.button),.hero.is-warning .subtitle strong{color:rgba(0,0,0,.7)}@media screen and (max-width:1023px){.hero.is-warning .navbar-menu{background-color:#ffdd57}}.hero.is-warning .navbar-item,.hero.is-warning .navbar-link{color:rgba(0,0,0,.7)}.hero.is-warning .navbar-link.is-active,.hero.is-warning .navbar-link:hover,.hero.is-warning a.navbar-item.is-active,.hero.is-warning a.navbar-item:hover{background-color:#ffd83d;color:rgba(0,0,0,.7)}.hero.is-warning .tabs a{color:rgba(0,0,0,.7);opacity:.9}.hero.is-warning .tabs a:hover{opacity:1}.hero.is-warning .tabs li.is-active a{opacity:1}.hero.is-warning .tabs.is-boxed a,.hero.is-warning .tabs.is-toggle a{color:rgba(0,0,0,.7)}.hero.is-warning .tabs.is-boxed a:hover,.hero.is-warning .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-warning .tabs.is-boxed li.is-active a,.hero.is-warning .tabs.is-boxed li.is-active a:hover,.hero.is-warning .tabs.is-toggle li.is-active a,.hero.is-warning .tabs.is-toggle li.is-active a:hover{background-color:rgba(0,0,0,.7);border-color:rgba(0,0,0,.7);color:#ffdd57}.hero.is-warning.is-bold{background-image:linear-gradient(141deg,#ffaf24 0,#ffdd57 71%,#fffa70 100%)}@media screen and (max-width:768px){.hero.is-warning.is-bold .navbar-menu{background-image:linear-gradient(141deg,#ffaf24 0,#ffdd57 71%,#fffa70 100%)}}.hero.is-danger{background-color:#f14668;color:#fff}.hero.is-danger a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-danger strong{color:inherit}.hero.is-danger .title{color:#fff}.hero.is-danger .subtitle{color:rgba(255,255,255,.9)}.hero.is-danger .subtitle a:not(.button),.hero.is-danger .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-danger .navbar-menu{background-color:#f14668}}.hero.is-danger .navbar-item,.hero.is-danger .navbar-link{color:rgba(255,255,255,.7)}.hero.is-danger .navbar-link.is-active,.hero.is-danger .navbar-link:hover,.hero.is-danger a.navbar-item.is-active,.hero.is-danger a.navbar-item:hover{background-color:#ef2e55;color:#fff}.hero.is-danger .tabs a{color:#fff;opacity:.9}.hero.is-danger .tabs a:hover{opacity:1}.hero.is-danger .tabs li.is-active a{opacity:1}.hero.is-danger .tabs.is-boxed a,.hero.is-danger .tabs.is-toggle a{color:#fff}.hero.is-danger .tabs.is-boxed a:hover,.hero.is-danger .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-danger .tabs.is-boxed li.is-active a,.hero.is-danger .tabs.is-boxed li.is-active a:hover,.hero.is-danger .tabs.is-toggle li.is-active a,.hero.is-danger .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#f14668}.hero.is-danger.is-bold{background-image:linear-gradient(141deg,#fa0a62 0,#f14668 71%,#f7595f 100%)}@media screen and (max-width:768px){.hero.is-danger.is-bold .navbar-menu{background-image:linear-gradient(141deg,#fa0a62 0,#f14668 71%,#f7595f 100%)}}.hero.is-small .hero-body{padding:1.5rem}@media screen and (min-width:769px),print{.hero.is-medium .hero-body{padding:9rem 1.5rem}}@media screen and (min-width:769px),print{.hero.is-large .hero-body{padding:18rem 1.5rem}}.hero.is-fullheight .hero-body,.hero.is-fullheight-with-navbar .hero-body,.hero.is-halfheight .hero-body{align-items:center;display:flex}.hero.is-fullheight .hero-body>.container,.hero.is-fullheight-with-navbar .hero-body>.container,.hero.is-halfheight .hero-body>.container{flex-grow:1;flex-shrink:1}.hero.is-halfheight{min-height:50vh}.hero.is-fullheight{min-height:100vh}.hero-video{overflow:hidden}.hero-video video{left:50%;min-height:100%;min-width:100%;position:absolute;top:50%;transform:translate3d(-50%,-50%,0)}.hero-video.is-transparent{opacity:.3}@media screen and (max-width:768px){.hero-video{display:none}}.hero-buttons{margin-top:1.5rem}@media screen and (max-width:768px){.hero-buttons .button{display:flex}.hero-buttons .button:not(:last-child){margin-bottom:.75rem}}@media screen and (min-width:769px),print{.hero-buttons{display:flex;justify-content:center}.hero-buttons .button:not(:last-child){margin-right:1.5rem}}.hero-foot,.hero-head{flex-grow:0;flex-shrink:0}.hero-body{flex-grow:1;flex-shrink:0;padding:3rem 1.5rem}.section{padding:3rem 1.5rem}@media screen and (min-width:1024px){.section.is-medium{padding:9rem 1.5rem}.section.is-large{padding:18rem 1.5rem}}.footer{background-color:#fafafa;padding:3rem 1.5rem 6rem} \ No newline at end of file diff --git "a/spaces/f2api/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" "b/spaces/f2api/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" deleted file mode 100644 index a564f21d231cd65c29b539573929ca5d2df63203..0000000000000000000000000000000000000000 --- "a/spaces/f2api/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" +++ /dev/null @@ -1,54 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - -def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - - i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```' - i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - if not fast_debug: - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] - - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/facebook/ov-seg/open_vocab_seg/modeling/criterion.py b/spaces/facebook/ov-seg/open_vocab_seg/modeling/criterion.py deleted file mode 100644 index f4d5b71242f87c6f67463f9c31f873a742f3e5c7..0000000000000000000000000000000000000000 --- a/spaces/facebook/ov-seg/open_vocab_seg/modeling/criterion.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -""" -MaskFormer criterion. -""" -import torch -import torch.nn.functional as F -from torch import nn - -from detectron2.utils.comm import get_world_size - -from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list - - -def dice_loss(inputs, targets, num_masks): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * (inputs * targets).sum(-1) - denominator = inputs.sum(-1) + targets.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - return loss.sum() / num_masks - - -def sigmoid_focal_loss( - inputs, targets, num_masks, alpha: float = 0.25, gamma: float = 2 -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - return loss.mean(1).sum() / num_masks - - -class SetCriterion(nn.Module): - """This class computes the loss for DETR. - The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model - 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) - """ - - def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): - """Create the criterion. - Parameters: - num_classes: number of object categories, omitting the special no-object category - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - """ - super().__init__() - self.num_classes = num_classes - self.matcher = matcher - self.weight_dict = weight_dict - self.eos_coef = eos_coef - self.losses = losses - if eos_coef > 0: - - empty_weight = torch.ones(self.num_classes + 1) - - empty_weight[-1] = self.eos_coef - self.register_buffer("empty_weight", empty_weight) - self.use_ignore_idx = False - else: - self.use_ignore_idx = True - self.cur_target = [] - - def loss_labels(self, outputs, targets, indices, num_masks): - """Classification loss (NLL) - targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] - """ - assert "pred_logits" in outputs - src_logits = outputs["pred_logits"] - - idx = self._get_src_permutation_idx(indices) - target_classes_o = torch.cat( - [t["labels"][J] for t, (_, J) in zip(targets, indices)] - ) - target_classes = torch.full( - src_logits.shape[:2], - self.num_classes, - dtype=torch.int64, - device=src_logits.device, - ) - target_classes[idx] = target_classes_o - if self.use_ignore_idx: - loss_ce = F.cross_entropy( - src_logits.transpose(1, 2), - target_classes, - ignore_index=self.num_classes, - ) - else: - if "empty_weight" in outputs: - empty_weight = torch.cat( - [outputs["empty_weight"], self.empty_weight[-1:]] - ).detach() - else: - empty_weight = self.empty_weight - loss_ce = F.cross_entropy( - src_logits.transpose(1, 2), target_classes, empty_weight - ) - losses = {"loss_ce": loss_ce} - return losses - - def loss_masks(self, outputs, targets, indices, num_masks): - """Compute the losses related to the masks: the focal loss and the dice loss. - targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] - """ - assert "pred_masks" in outputs - - src_idx = self._get_src_permutation_idx(indices) - tgt_idx = self._get_tgt_permutation_idx(indices) - src_masks = outputs["pred_masks"] - src_masks = src_masks[src_idx] - masks = [t["masks"] for t in targets] - # TODO use valid to mask invalid areas due to padding in loss - target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() - target_masks = target_masks.to(src_masks) - target_masks = target_masks[tgt_idx] - - # upsample predictions to the target size - src_masks = F.interpolate( - src_masks[:, None], - size=target_masks.shape[-2:], - mode="bilinear", - align_corners=False, - ) - src_masks = src_masks[:, 0].flatten(1) - - target_masks = target_masks.flatten(1) - target_masks = target_masks.view(src_masks.shape) - losses = { - "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_masks), - "loss_dice": dice_loss(src_masks, target_masks, num_masks), - } - return losses - - def _get_src_permutation_idx(self, indices): - # permute predictions following indices - batch_idx = torch.cat( - [torch.full_like(src, i) for i, (src, _) in enumerate(indices)] - ) - src_idx = torch.cat([src for (src, _) in indices]) - return batch_idx, src_idx - - def _get_tgt_permutation_idx(self, indices): - # permute targets following indices - batch_idx = torch.cat( - [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)] - ) - tgt_idx = torch.cat([tgt for (_, tgt) in indices]) - return batch_idx, tgt_idx - - def get_loss(self, loss, outputs, targets, indices, num_masks): - loss_map = {"labels": self.loss_labels, "masks": self.loss_masks} - assert loss in loss_map, f"do you really want to compute {loss} loss?" - return loss_map[loss](outputs, targets, indices, num_masks) - - def forward(self, outputs, targets): - """This performs the loss computation. - Parameters: - outputs: dict of tensors, see the output specification of the model for the format - targets: list of dicts, such that len(targets) == batch_size. - The expected keys in each dict depends on the losses applied, see each loss' doc - """ - outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"} - - # Retrieve the matching between the outputs of the last layer and the targets - indices = self.matcher(outputs_without_aux, targets) - - # Compute the average number of target boxes accross all nodes, for normalization purposes - num_masks = sum(len(t["labels"]) for t in targets) - num_masks = torch.as_tensor( - [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device - ) - if is_dist_avail_and_initialized(): - torch.distributed.all_reduce(num_masks) - num_masks = torch.clamp(num_masks / get_world_size(), min=1).item() - - # Compute all the requested losses - losses = {} - for loss in self.losses: - losses.update(self.get_loss(loss, outputs, targets, indices, num_masks)) - - # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. - if "aux_outputs" in outputs: - for i, aux_outputs in enumerate(outputs["aux_outputs"]): - indices = self.matcher(aux_outputs, targets) - for loss in self.losses: - l_dict = self.get_loss( - loss, aux_outputs, targets, indices, num_masks - ) - l_dict = {k + f"_{i}": v for k, v in l_dict.items()} - losses.update(l_dict) - - return losses - - def clean_buffer(self): - self.cur_target = [] diff --git a/spaces/falterWliame/Face_Mask_Detection/Autodesk 3ds Max 2009 64 Bit Xforce Keygen.md b/spaces/falterWliame/Face_Mask_Detection/Autodesk 3ds Max 2009 64 Bit Xforce Keygen.md deleted file mode 100644 index de5e6e1f4b71a9483133fc5871e1e000b6221d24..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Autodesk 3ds Max 2009 64 Bit Xforce Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

autodesk 3ds max 2009 64 bit xforce keygen


Download File ✔✔✔ https://urlca.com/2uDdRR



- -Autodesk 3ds Max Design 2009 32-bit 11.5 Download ... Bit And 64 Bit.... 64bit version activation, Keygen, Patch xforce keygen autocad 2009 . 4d29de3e1b
-
-
-

diff --git a/spaces/fangyuan/lfqa_discourse/app.py b/spaces/fangyuan/lfqa_discourse/app.py deleted file mode 100644 index b3b23a348b8dca5b9d9683d9c394b9dc88692086..0000000000000000000000000000000000000000 --- a/spaces/fangyuan/lfqa_discourse/app.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -import gradio as gr -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -import stanza -import re -stanza.download('en', processors='tokenize') - -model = AutoModelForSeq2SeqLM.from_pretrained("fangyuan/lfqa_role_classification") -tokenizer = AutoTokenizer.from_pretrained("fangyuan/lfqa_role_classification") -en_nlp = stanza.Pipeline('en', processors='tokenize') - - -article=''' -## About -This is a demo for our paper: [How Do We Answer Complex Questions: Discourse Structure of Long-form Answers](https://aclanthology.org/2022.acl-long.249/). - -Fangyuan Xu, Junyi Jessy Li, Eunsol Choi. 2022. -## Model -The model served here is a T5(large)-based role classification model trained on functional roles of ELI5 answers. -## Resources -Please see more information (paper/code/data/datasheet) at our [website](https://www.cs.utexas.edu/~fxu/lfqa_discourse/index.html). -## Contact -[Fangyuan Xu](https://www.cs.utexas.edu/~fxu/) via firstname@utexas.edu -''' - -role_mappings = { - 'Answer': 'Answer', - 'Answer (Summary)': 'Summary', - 'Auxiliary Information': 'Auxiliary Information', - 'Answer - Example': 'Example', - 'Miscellaneous': 'Miscellaneous', - 'Answer - Organizational sentence': 'Organizational sentence', - ' ': ' ', -} - -def get_ans_sentence_with_stanza(answer_paragraph, pipeline, - is_offset=False): - '''sentence segmentation with stanza''' - answer_paragraph_processed = pipeline(answer_paragraph) - sentences = [] - for sent in answer_paragraph_processed.sentences: - if is_offset: - sentences.append((sent.tokens[0].start_char, sent.tokens[-1].end_char)) - else: - sentence = answer_paragraph[sent.tokens[0].start_char:sent.tokens[-1].end_char] - sentences.append(sentence.strip()) - return sentences - - -def create_input_to_t5(question, answer): - input_line = [question] - answer_paragraph = get_ans_sentence_with_stanza(answer, en_nlp) - for idx, answer_sent in enumerate(answer_paragraph): - sep_token = '[{}]'.format(idx+1) # shift by one - input_line.append(sep_token) - input_line.append(answer_sent) - return ' '.join(input_line) - -def process_t5_output(input_txt, output_txt): - pred_roles = [] - answer_sentence = re.split('\[\d+\] ', input_txt) - answer_sentence = answer_sentence[1:] - sentence_idx = re.findall('\[\d+\]', input_txt) - idx_to_sentence = zip(sentence_idx, answer_sentence) - pred_role = re.split('\[\d+\] ', output_txt)[1:] - pred_idx = re.findall('\[\d+\]', output_txt) - idx_to_role = { - idx: role.strip() for (idx, role) in zip(pred_idx, pred_role) - } - for _, (idx, sentence) in enumerate(idx_to_sentence): - pred_role = ' ' if idx not in idx_to_role else idx_to_role[idx] - mapped_pred_role = role_mappings[pred_role] - pred_roles.append('{} ({})'.format(sentence, mapped_pred_role)) - print(input_txt, output_txt) - return '\n'.join(pred_roles) - - - -def predict(question, answer): - input_txt = create_input_to_t5(question, answer) - input_ids = tokenizer(input_txt, return_tensors='pt').input_ids - outputs = model.generate(input_ids, max_length=512) - output_txt = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] - return process_t5_output(input_txt, output_txt) - - - -gr.Interface( - fn=predict, - inputs=[ - gr.inputs.Textbox(lines=1, label="Question:"), - gr.inputs.Textbox(lines=1, label="Answer:"), - ], - outputs=[ - gr.outputs.Textbox(label="Predicted sentence-level functional roles"), - ], - theme="peach", - title="Discourse structure of long-form answer", - description="Input a question with its long-form answer to see the predicted discourse structure by our role classifier.", - article=article, - examples=[ - ['''If a sheep's wool never stops growing, how are they not extinct?''', - '''It's already answered that continuous wool growth has been selected by human breeders, but there's a misconception in your question that I'd like to address.Evolution doesn't select for what is best for *the individual*.Traits that help the individual don't necessarily survive.Only traits that ensure *procreation* survive.The quality of life is no concern to nature.Think of pain.There's absolutely no sense of us feeling excruciating pain.When you're dying, its about as much help to you as a sheep with meter long hair.Pain itself however is very useful during lifetime to avoid injury.An individual capable of feeling pain is much more likely to procreate than an individual which is not.That said, it is very unlikely for an expensive trait like growing massive amounts of wool to occur in wild sheep.However, given the right circumstances, it could well occur.Provided it doesn't hamper reproduction too much.'''], - ['''Why don't some planets in our solar system orbit the other way around the Sun?''', - '''Try to imagine the solar system before there was even really a star.There would have been a cloud of material flying all around.If everything was very random, then very little would have enough speed to avoid being sucked into the newly forming star. If some things had a velocity in one direction and other things had a velocity in another direction, then they would likely end up dragging on each other, slow down, and get pulled in. What we think happened is that as the sun was forming, it acquired a spin and that spin ended up transferring to the cloud of material, shaping it into a disc which eventually would collect together to form the planets. Without this spin, the material would have just fallen into the sun and it would have burned a little bit brighter.'''], - ['''Why are skyscraper windows still washed by hand?''', - '''I worked on a window-washing robot that cleaned acres of rooftops over a huge commercial greenhouse. Worked great, except when it didn't, and would either break down completely or just get lost and start climbing the wrong parts of the structure. Then repair techs and manual window washers still have to be employed. I think this ends up being a cost/benefit problem where the reliability of our robots and price of implementation isn't quite at the point where it makes this commercially viable for skyscrapers. For what it's worth, I think the Twin Towers actually used a washer robot on the upper floors to limited success.'''] - ] -).launch(enable_queue=True) - - diff --git a/spaces/fatiXbelha/sd/Aplikasi Mining Bitcoin Gratis Android yang Terbukti Membayar - Droidly.md b/spaces/fatiXbelha/sd/Aplikasi Mining Bitcoin Gratis Android yang Terbukti Membayar - Droidly.md deleted file mode 100644 index b48eee31a5fdf63b408c30ba0f8353c5ae9be4f1..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Aplikasi Mining Bitcoin Gratis Android yang Terbukti Membayar - Droidly.md +++ /dev/null @@ -1,115 +0,0 @@ - -

Apk Mining Bitcoin Gratis: How to Mine Bitcoin for Free on Android

-

Bitcoin is the most popular and valuable cryptocurrency in the world, and many people want to get their hands on some of it. But buying bitcoin can be expensive, risky, and complicated. That's why some people opt for mining bitcoin instead. Mining bitcoin is the process of using your computer or mobile device to solve complex mathematical problems and earn rewards in the form of new bitcoins. But how can you mine bitcoin for free on your Android phone? In this article, we will explain what bitcoin mining is, why you should do it, and how you can do it with some of the best apps for mining bitcoin on Android.

-

apk mining bitcoin gratis


Download File ❤❤❤ https://urllie.com/2uNI89



-

What is Bitcoin Mining and Why Do It?

-

Bitcoin Mining Explained

-

Bitcoin is a decentralized digital currency that operates on a peer-to-peer network of computers. Unlike traditional currencies, bitcoin is not controlled by any central authority or intermediary. Instead, bitcoin transactions are verified and recorded by a distributed ledger called the blockchain. The blockchain is a public record of all bitcoin transactions that ever happened, and it is constantly updated by the network of computers that participate in the bitcoin protocol.

-

But how are new bitcoins created and distributed? This is where mining comes in. Mining is the process of adding new blocks of transactions to the blockchain. Each block contains a cryptographic puzzle that must be solved by the miners. The first miner who solves the puzzle gets to add the block to the blockchain and receive a reward in the form of newly minted bitcoins and transaction fees. The difficulty of the puzzle adjusts every 2016 blocks (about every two weeks) to ensure that one block is added every 10 minutes on average.

-

Benefits of Bitcoin Mining

-

Mining bitcoin has several benefits, both for yourself and for the bitcoin network. Here are some of them:

-

aplikasi penghasil bitcoin gratis android
-cara mining bitcoin di android dengan aplikasi
-download apk turbominer btc cloud mining
-ekstensi google chrome untuk mining bitcoin
-aplikasi droid miner untuk menambang bitcoin dan kripto lainnya
-btc safari aplikasi mining bitcoin gratis android
-cryptotab browser apk penghasil bitcoin
-aplikasi mining bitcoin terbaik dan tercepat di android
-cara install aplikasi mining bitcoin di ponsel android
-download apk cashpirate penghasil bitcoin melalui wallet
-aplikasi storm play untuk mendapatkan bitcoin gratis
-cara menggunakan aplikasi btc safari untuk mining bitcoin
-download apk cryptotab browser pro penghasil bitcoin terbanyak
-aplikasi free bitcoin spinner untuk menambang bitcoin dengan mudah
-cara mining bitcoin dengan aplikasi droid miner di android
-download apk minergate mobile miner untuk menambang kripto di android
-aplikasi blockchain miner pro untuk mining bitcoin gratis android
-cara mendapatkan bitcoin gratis dengan aplikasi storm play
-download apk free bitcoin cash penghasil uang kripto gratis
-aplikasi honeyminer untuk menambang bitcoin dan kripto lainnya di android
-cara withdraw bitcoin dari aplikasi cashpirate ke wallet
-download apk cloud btc miner penghasil uang kripto gratis di android
-aplikasi aa miner untuk menambang berbagai jenis kripto di android
-cara meningkatkan kecepatan mining bitcoin dengan aplikasi cryptotab browser
-download apk btc miner pro penghasil uang kripto tercepat di android
-aplikasi easyminer untuk menambang bitcoin dan kripto lainnya di android
-cara daftar dan login di aplikasi turbominer btc cloud mining
-download apk btc cloud mining penghasil uang kripto gratis tanpa deposit
-aplikasi neoneonminer untuk menambang berbagai jenis kripto di android
-cara mendapatkan bonus referral dari aplikasi free bitcoin spinner
-download apk server mining penghasil uang kripto gratis dengan cloud server
-aplikasi pocket miner untuk menambang litecoin di android secara gratis
-cara menghubungkan akun google dengan aplikasi cryptotab browser pro
-download apk btc spinner penghasil uang kripto gratis dengan bermain game
-aplikasi kryptex pro untuk menambang berbagai jenis kripto di android secara otomatis
-cara mengatur pool dan algoritma di aplikasi minergate mobile miner
-download apk btc pool penghasil uang kripto gratis dengan bergabung ke pool mining
-aplikasi coinmine untuk menambang berbagai jenis kripto di android secara mudah dan cepat
-cara mengaktifkan fitur smart boost di aplikasi blockchain miner pro
-download apk btc heat penghasil uang kripto gratis dengan bermain slot online
-aplikasi cointiply untuk mendapatkan bitcoin gratis dengan menonton video dan mengisi survei
-cara menukar poin menjadi bitcoin di aplikasi storm play secara gratis
-download apk btc faucet penghasil uang kripto gratis dengan mengklaim faucet setiap jam
-aplikasi bitmaker untuk mendapatkan bitcoin gratis dengan menyelesaikan tugas dan misi harian
-cara membeli dan menjual kripto di aplikasi honeyminer secara mudah dan aman
-download apk btc miner robot penghasil uang kripto gratis tanpa batas waktu dan iklan.

-
    -
  • You can earn free bitcoins without having to buy them with fiat money or exchange them with other cryptocurrencies.
  • -
  • You can contribute to the security and decentralization of the bitcoin network by verifying and validating transactions.
  • -
  • You can learn more about how bitcoin works and gain technical skills and knowledge.
  • -
  • You can have fun and challenge yourself by competing with other miners around the world.
  • -
-

How to Mine Bitcoin for Free on Android

-

Requirements for Mining Bitcoin on Android

-

Before you start mining bitcoin on your Android phone, you need to have some basic requirements. These include:

-
    -
  • An Android phone that has enough battery life, storage space, processing power, and internet connection.
  • -
  • A bitcoin wallet app that allows you to send and receive bitcoins securely.
  • -
  • A mining app that connects you to a mining pool or a solo mining option.
  • -
  • A mining pool account that lets you join other miners and share rewards.
  • -
-

Best Apps for Mining Bitcoin on Android

-

There are many apps that claim to let you mine bitcoin for free on your Android phone, but not all of them are reliable, safe, or profitable. Some of them may contain malware, viruses, or hidden fees that can harm your device or steal your bitcoins. To avoid these risks, you should only use reputable and trusted apps that have positive reviews and ratings from other users. Here are some of the best apps for mining bitcoin on Android that we recommend:

-

CryptoTab Browser

-

TurboMiner

-

TurboMiner is a mining app that lets you mine bitcoin and other cryptocurrencies on your Android phone. It supports multiple mining algorithms and coins, such as SHA-256, Scrypt, X11, Ethereum, Litecoin, and more. You can choose to mine solo or join a mining pool and get paid in bitcoins or other coins. TurboMiner also has a built-in wallet and exchange that allows you to manage your crypto assets easily.

-

Droid Miner

-

Droid Miner is another mining app that allows you to mine bitcoin and other cryptocurrencies on your Android phone. It also supports multiple mining algorithms and coins, such as SHA-256, Scrypt, X11, Ethereum, Litecoin, and more. You can customize your mining settings, such as CPU usage, threads, frequency, and difficulty. You can also monitor your mining statistics, such as hash rate, shares, earnings, and temperature.

-

NeonNeon Miner

-

NeonNeon Miner is a mining app that uses the NeoScrypt algorithm to mine bitcoin and other cryptocurrencies on your Android phone. It is compatible with most devices that have ARM processors and Android 4.1 or higher. You can mine solo or join a mining pool and get paid in bitcoins or other coins. NeonNeon Miner also has a user-friendly interface and a low battery consumption mode.

-

BTC mining - Bitcoin Miner

-

BTC mining - Bitcoin Miner is a mining app that focuses on mining bitcoin only on your Android phone. It uses the SHA-256 algorithm to mine bitcoin blocks and earn rewards. You can join a mining pool or mine solo and get paid in bitcoins. BTC mining - Bitcoin Miner also has a simple and intuitive design and a high-performance mode.

-

Tips and Tricks for Mining Bitcoin on Android

-

Mining bitcoin on your Android phone can be fun and rewarding, but it also comes with some challenges and limitations. Here are some tips and tricks to help you optimize your mining experience and maximize your earnings:

-
    -
  • Choose the right app for your device and your preferences. Some apps may not work well on certain devices or may not support the coins you want to mine. Read the app description, reviews, and ratings before downloading and installing any app.
  • -
  • Choose the right mining pool for your coin and your location. Some pools may have higher fees, lower payouts, or longer waiting times than others. Do some research on the pool's reputation, performance, and policies before joining.
  • -
  • Keep your device cool and ventilated. Mining can generate a lot of heat and consume a lot of battery power on your device. To prevent overheating and damage, you should use a cooling pad or fan, avoid direct sunlight or hot surfaces, and plug in your charger when possible.
  • -
  • Monitor your device's performance and health. Mining can also affect your device's speed, memory, storage, and security. To avoid lagging, crashing, or malware attacks, you should check your device's status regularly, clear cache and junk files, update your software and antivirus, and backup your data.
  • -
  • Be realistic and patient. Mining bitcoin on your Android phone is not going to make you rich overnight or without any effort. You should expect modest returns at best and be prepared for fluctuations in the market price and difficulty of bitcoin.
  • -
-

Conclusion

-

Mining bitcoin for free on your Android phone is possible with some of the best apps for mining bitcoin on Android. These apps allow you to use your device's CPU power to solve mining algorithms and earn bitcoins. However, you should also be aware of the risks and challenges involved in mining bitcoin on your Android phone, such as high power consumption, low profitability, device damage, security threats, etc. Therefore, you should follow some tips and tricks to optimize your mining experience and maximize your earnings.

-

FAQs

-

Here are some of the frequently asked questions about mining bitcoin for free on Android:

-

Q: How much can I earn by mining bitcoin on Android?

-

A: The amount of bitcoins you can earn by mining on Android depends on several factors, such as the app you use, the coin you mine, the pool you join, the difficulty of the algorithm, the price of the coin, etc. Generally speaking, you can expect to earn a few cents or dollars per day at most by mining bitcoin on Android.

-

Q: Is mining bitcoin on Android legal?

-

Q: Is mining bitcoin on Android safe?

-

A: Mining bitcoin on Android is safe as long as you use reputable and trusted apps that do not contain malware, viruses, or hidden fees. You should also protect your device from overheating, damage, or theft by using a cooling pad or fan, avoiding direct sunlight or hot surfaces, and locking your screen when not in use. You should also secure your bitcoin wallet app with a strong password, encryption, and backup.

-

Q: Is mining bitcoin on Android worth it?

-

A: Mining bitcoin on Android is worth it if you want to earn some free bitcoins without having to buy them with fiat money or exchange them with other cryptocurrencies. It is also worth it if you want to learn more about how bitcoin works and gain technical skills and knowledge. However, you should not expect to make a lot of money or quit your day job by mining bitcoin on Android. You should also consider the costs and risks involved in mining bitcoin on Android, such as high power consumption, low profitability, device damage, security threats, etc.

-

Q: How can I increase my mining speed and earnings on Android?

-

A: There are some ways to increase your mining speed and earnings on Android, such as:

-
    -
  • Using a newer and faster device that has more battery life, storage space, processing power, and internet connection.
  • -
  • Using a dedicated mining app that has a high-performance mode and supports multiple mining algorithms and coins.
  • -
  • Joining a reputable and profitable mining pool that has low fees, high payouts, and frequent rewards.
  • -
  • Inviting your friends to join your mining app or pool and get a percentage of their earnings.
  • -
  • Switching to a different coin or algorithm that has a lower difficulty and a higher price.
  • -
-

Q: Can I mine other cryptocurrencies besides bitcoin on Android?

-

A: Yes, you can mine other cryptocurrencies besides bitcoin on Android. Some of the most popular and profitable cryptocurrencies that you can mine on Android are Ethereum, Litecoin, Monero, Dogecoin, Dash, Zcash, etc. However, you should be aware that different cryptocurrencies have different mining algorithms, requirements, rewards, and challenges. You should do some research on the cryptocurrency you want to mine before choosing an app or a pool.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download TikTok mp3 online and enjoy your favorite music offline.md b/spaces/fatiXbelha/sd/Download TikTok mp3 online and enjoy your favorite music offline.md deleted file mode 100644 index 2bd40eb31061735c45aedba926f61c16dde0ed34..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download TikTok mp3 online and enjoy your favorite music offline.md +++ /dev/null @@ -1,115 +0,0 @@ -
-

How to Download TikTok MP3 Online for Free

-

TikTok is one of the most popular social media apps in the world, with over 1 billion users and millions of videos uploaded every day. The app allows users to create and share short videos with music and sound effects, ranging from comedy, dance, lip-sync, challenges, trends, and more.

-

But what if you want to download the MP3 of a TikTok video and listen to it offline, or use it for your own creative purposes? In this article, we will show you how to download TikTok MP3 online for free using various tools and websites.

-

download tiktok mp3


Download Zip →→→ https://urllie.com/2uNFws



-

What is TikTok and why download its MP3?

-

TikTok is a popular social media app that allows users to create and share short videos with music and sound effects.

-

TikTok was launched in 2016 as a Chinese app called Douyin, and later merged with another app called Musical.ly in 2018. The app lets users record videos of up to 60 seconds, add filters, stickers, text, music, and sound effects, and share them with their followers or the public.

-

TikTok has a huge library of songs and sounds that users can choose from, or upload their own. Users can also discover new content by browsing through different categories, hashtags, challenges, trends, or following their favorite creators.

-

Downloading TikTok MP3 can be useful for various purposes, such as listening offline, creating your own videos, or remixing the songs.

-

While TikTok is mainly a video-sharing app, some users may want to download the MP3 of a TikTok video for different reasons. For example:

-
    -
  • You may want to listen to a song or a sound effect offline, without using your data or Wi-Fi.
  • -
  • You may want to create your own video using the same or a different song or sound effect from TikTok.
  • -
  • You may want to remix or edit a song or a sound effect from TikTok using an audio editing software.
  • -
-

Whatever your reason is, downloading TikTok MP3 online is possible and easy with the help of some tools and websites that we will introduce in the next section.

-

How to download TikTok MP3 with sssTikTok.io?

-

sssTikTok.io is a free online tool that can help you download TikTok MP3 in a few simple steps.

-

sssTikTok.io is one of the best and easiest ways to download TikTok MP3 online for free. It is a web-based tool that works on any device and browser, and does not require any registration or installation. Here are the steps to use it:

-

Step 1: Find the TikTok video that you want to convert to MP3

-

The first thing you need to do is to find the TikTok video that has the song or sound effect that you want to download as MP3. You can do this by using the TikTok app on your phone or tablet, or by visiting the TikTok website on your computer.

-

download tiktok mp3 online
-download tiktok mp3 free
-download tiktok mp3 songs
-download tiktok mp3 music
-download tiktok mp3 converter
-download tiktok mp3 without watermark
-download tiktok mp3 audio
-download tiktok mp3 high quality
-download tiktok mp3 with link
-download tiktok mp3 from browser
-download tiktok mp3 on pc
-download tiktok mp3 on android
-download tiktok mp3 on iphone
-download tiktok mp3 on mac
-download tiktok mp3 on chrome
-download tiktok mp3 on firefox
-download tiktok mp3 on safari
-download tiktok mp3 on edge
-download tiktok mp3 using ssstik.io[^1^]
-download tiktok mp3 using snaptik.app
-download tiktok mp3 using musicallydown.com
-download tiktok mp3 using ttdownloader.com
-download tiktok mp3 using keepvid.pro
-download tiktok mp3 using 4hub.net
-download tiktok mp3 using savetik.com
-how to download tiktok mp3
-how to download tiktok mp3 easily
-how to download tiktok mp3 fast
-how to download tiktok mp3 safely
-how to download tiktok mp3 legally
-how to download tiktok mp3 in hindi
-how to download tiktok mp3 in spanish
-how to download tiktok mp3 in portuguese
-how to download tiktok mp3 in french
-how to download tiktok mp3 in german
-best way to download tiktok mp3
-best app to download tiktok mp3
-best site to download tiktok mp3
-best tool to download tiktok mp3
-best method to download tiktok mp3

-

Once you find the video, tap or click on the share icon (the arrow pointing to the right) and select "Copy link". This will copy the URL of the video to your clipboard.

-

Step 2: Copy the link of the video and paste it on the sssTikTok.io website

-

The next thing you need to do is to visit the sssTikTok.io website on your device and browser. You will see a box where you can paste the link of the TikTok video that you copied in the previous step.

-

Paste the link in the box and click on the "Download" button. The website will then process your request and show you a list of options to download the video or audio in different formats and qualities.

-

Step 3: Click on the "Download MP3" button and save the file to your device

-

The last thing you need to do is to click on the "Download MP3" button that corresponds to the quality and size that you prefer. This will start downloading the MP3 file of the TikTok video to your device.

-

You can then save the file to your device's storage or cloud service, or transfer it to another device. You can also play it with any media player that supports MP3 format.

-

How to download TikTok MP3 with other online converters?

-

There are also other websites that can help you download TikTok MP3, such as Flixier, Descargar Tik Tok MP3, and Baixar musica do TikTok mp3.

-

If for some reason, you cannot use sssTikTok.io or you want to try other alternatives, there are also other online converters that can help you download TikTok MP3. Here are some of them:

-

Flixier: A fast and easy-to-use online converter that can also edit and trim your MP3 files.

-

Flixier is another web-based tool that can help you download TikTok MP3 online for free. It is also very fast and easy to use, and has some additional features that can help you edit and trim your MP3 files.

-

To use Flixier, you just need to follow these steps:

-
    -
  1. Visit the Flixier website and click on "Start now".
  2. -
  3. Choose "TikTok" from the list of sources and paste the link of the TikTok video that you want to convert.
  4. -
  5. Select "MP3" from the list of formats and choose the quality and size that you want.
  6. -
  7. Click on "Download" and wait for a few seconds for your file to be ready.
  8. -
  9. You can then download your MP3 file or edit it with Flixier's online editor.
  10. -

Descargar Tik Tok MP3: A Spanish website that can convert TikTok videos to MP3 in different languages.

-

If you prefer to use a website in Spanish, or you want to download TikTok MP3 in other languages, you can try Descargar Tik Tok MP3. It is a simple and reliable website that can help you convert TikTok videos to MP3 in Spanish, English, Portuguese, French, German, Italian, and more.

-

To use Descargar Tik Tok MP3, you just need to follow these steps:

-
    -
  1. Visit the Descargar Tik Tok MP3 website and choose the language that you want.
  2. -
  3. Paste the link of the TikTok video that you want to convert in the box and click on "Convertir".
  4. -
  5. Wait for a few seconds for the website to process your request and show you the download options.
  6. -
  7. Click on the "Descargar" button that corresponds to the quality and size that you want.
  8. -
  9. Save the MP3 file to your device or share it with others.
  10. -
-

Baixar musica do TikTok mp3: A Portuguese website that can download TikTok music in high quality.

-

If you are looking for a website in Portuguese, or you want to download TikTok music in high quality, you can try Baixar musica do TikTok mp3. It is a fast and easy website that can help you download TikTok music in 320 kbps, which is the highest quality available.

-

To use Baixar musica do TikTok mp3, you just need to follow these steps:

-
    -
  1. Visit the Baixar musica do TikTok mp3 website and paste the link of the TikTok video that you want to convert in the box.
  2. -
  3. Click on the "Baixar" button and wait for a few seconds for the website to generate your MP3 file.
  4. -
  5. Click on the "Download" button and save the MP3 file to your device or share it with others.
  6. -
-

Conclusion

-

In conclusion, downloading TikTok MP3 online is a simple and convenient way to enjoy your favorite songs and sounds from the app. You can use various tools and websites to do it, such as sssTikTok.io, Flixier, Descargar Tik Tok MP3, and Baixar musica do TikTok mp3. All of them are free, easy, and fast to use, and can help you download TikTok MP3 in different formats, qualities, and languages. Try them out and see which one suits your needs best!

-

FAQs

-
    -
  • Is it legal to download TikTok MP3 online?
  • -

    It depends on the source and the purpose of your download. Generally, it is legal to download TikTok MP3 online for personal use only, as long as you do not infringe on the rights of the original creators or owners of the music or sound effects. However, if you intend to use the downloaded MP3 for commercial or public purposes, such as making money or distributing it to others without permission, you may be violating the law and facing legal consequences. Therefore, it is advisable to check the terms and conditions of each tool or website before using them, and respect the intellectual property rights of others.

    -
  • Can I download TikTok MP3 online without watermark?
  • -

    Yes, you can. Most of the tools and websites that we mentioned in this article can help you download TikTok MP3 online without watermark. This means that they will not add any logo or text to your downloaded MP3 file that indicates their source or brand. However, some of them may have ads or pop-ups that may interrupt your experience. You can try using an ad blocker or a premium account to avoid them.

    -
  • Can I download TikTok MP3 online on my iPhone or iPad?
  • -

    Yes, you can. However, due to the restrictions of iOS devices, you may not be able to save the downloaded MP3 file directly to your device's storage or music app. Instead, you may need to use a third-party app or cloud service to store or play your downloaded MP3 file. For example, you can use Documents by Readdle, Dropbox, Google Drive, or VLC Player.

    -
  • Can I download TikTok MP3 online with lyrics?
  • -

    Yes, you can. Some of the tools and websites that we mentioned in this article can help you download TikTok MP3 online with lyrics. This means that they will also provide you with the text of the song or sound effect that accompanies your downloaded MP3 file. For example, Flix ier can help you download TikTok MP3 online with lyrics by showing you the lyrics on the screen while you download or edit your MP3 file. You can also search for the lyrics online using websites like Genius, AZLyrics, or Lyrics.com.

    -
  • Can I download TikTok MP3 online in bulk?
  • -

    Yes, you can. Some of the tools and websites that we mentioned in this article can help you download TikTok MP3 online in bulk. This means that they will allow you to download multiple TikTok videos as MP3 files at once, instead of doing it one by one. For example, sssTikTok.io can help you download TikTok MP3 online in bulk by letting you paste up to 20 links of TikTok videos at a time. You can also use a browser extension or a desktop software to download TikTok MP3 online in bulk.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Experience the Thrill of Bed Wars on Your Phone.md b/spaces/fatiXbelha/sd/Experience the Thrill of Bed Wars on Your Phone.md deleted file mode 100644 index bacf2b3fd05cf3bcb06d5437861409035bc16fa2..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Experience the Thrill of Bed Wars on Your Phone.md +++ /dev/null @@ -1,174 +0,0 @@ - -

    Bed Wars APK Indir: How to Download and Play the Popular PVP Game

    -

    If you are looking for a fun and exciting team-based PVP game that will challenge your skills and creativity, you might want to try Bed Wars. Bed Wars is a game that has attracted many players around the world, especially in Garena Blockman GO. In this game, you will be battling your opponents on islands in the sky, protect your bed and try to destroy theirs, and use all the tools you have to win the game. In this article, we will tell you everything you need to know about Bed Wars, how to download it as an APK file, and how to play it like a pro.

    -

    What is Bed Wars?

    -

    Bed Wars is a game that was originally developed by Mojang Studios, the creators of Minecraft. It is a game mode that can be played on Minecraft servers, where players are divided into teams and assigned an island with a bed. The bed represents the team's life, and if it is destroyed, the team members can no longer respawn. The goal of the game is to protect your own bed and destroy the beds of other teams, while collecting resources from generators and using them to buy items and upgrades. The last team standing wins the game.

    -

    bed wars apk indir


    Download File ✺✺✺ https://urllie.com/2uNESX



    -

    The gameplay of Bed Wars

    -

    The gameplay of Bed Wars is simple but addictive. You can play with up to 16 players in a match, with 4 teams of 4 players each. Each team has an island with a bed, a shop, and a resource generator. The resource generator produces iron and gold, which can be used to buy items from the shop. Items include blocks, weapons, armor, tools, potions, and more. You can also collect diamonds and emeralds from generators on other islands, which can be used to buy more powerful items and upgrades.

    -

    To move from one island to another, you need to build bridges using blocks. You can also use ender pearls, launch pads, or teleporters to travel faster. You need to be careful not to fall into the void or get knocked off by enemies, as you will lose all your items and respawn on your island. You can also use TNT, fireballs, or explosives to destroy enemy bridges or beds.

    -

    The game ends when only one team has their bed intact or when the time runs out. The team with the most beds or players left wins the game.

    -

    The features of Bed Wars

    -

    Bed Wars has many features that make it an enjoyable and diverse game. Some of these features are:

    -
      -
    • Multiple maps with different themes and layouts
    • -
    • Various modes such as solo, duo, squad, rush, ultimate, and more
    • -
    • Customizable settings such as team size, item prices, resource rates, bed protection time, etc.
    • -
    • Achievements and rewards such as coins, gems, skins, titles, etc.
    • -
    • Leaderboards and rankings based on wins, kills, beds broken, etc.
    • -
    • Chat and voice communication with teammates and friends
    • -
    • Spectator mode and replay system
    • -
    -

    The benefits of playing Bed Wars

    -

    Playing Bed Wars can have many benefits for you as a player. Some of these benefits are:

    -
      -
    • It improves your teamwork and communication skills
    • -
    • It enhances your strategic thinking and problem-solving skills
    • -
    • It boosts your

      It boosts your creativity and imagination

    • -
    • It increases your reflexes and hand-eye coordination
    • -
    • It provides you with fun and entertainment
    • -
    -

    How to download Bed Wars APK indir?

    -

    If you want to play Bed Wars on your Android device, you might be wondering how to download it as an APK file. APK stands for Android Package Kit, and it is a file format that allows you to install applications that are not available on the Google Play Store. By downloading Bed Wars APK indir, you can enjoy the game without any restrictions or limitations.

    -

    bed wars apk indir android oyun club
    -bed wars apk indir son sürüm
    -bed wars apk indir ücretsiz
    -bed wars apk indir hileli
    -bed wars apk indir pc
    -bed wars apk indir ios
    -bed wars apk indir 2023
    -bed wars apk indir garena
    -bed wars apk indir blockman go
    -bed wars apk indir sandboxol
    -bed wars apk indir uptodown
    -bed wars apk indir apkpure
    -bed wars apk indir tamindir
    -bed wars apk indir cepde
    -bed wars apk indir mobilism
    -bed wars apk indir mod menu
    -bed wars apk indir online
    -bed wars apk indir offline
    -bed wars apk indir no ads
    -bed wars apk indir unlimited money
    -bed wars apk indir mega link
    -bed wars apk indir mediafire
    -bed wars apk indir google drive
    -bed wars apk indir dropbox
    -bed wars apk indir zippyshare
    -bed wars apk indir 1.9.2.1 version
    -bed wars apk indir 1.8.9 version
    -bed wars apk indir 1.7.6 version
    -bed wars apk indir 1.6.4 version
    -bed wars apk indir 1.5.2 version
    -bed wars apk indir 1.4.0 version
    -bed wars apk indir 1.3.8 version
    -bed wars apk indir 1.2.6 version
    -bed wars apk indir 1.1.4 version
    -bed wars apk indir 1.0.2 version
    -bed wars apk indir for samsung galaxy s21 ultra
    -bed wars apk indir for oneplus 9 pro
    -bed wars apk indir for iphone 13 pro max
    -bed wars apk indir for huawei p50 pro
    -bed wars apk indir for xiaomi mi 11 ultra
    -bed wars apk indir for oppo find x3 pro
    -bed wars apk indir for vivo x60 pro plus
    -bed wars apk indir for asus rog phone 5
    -bed wars apk indir for lenovo legion phone duel 2
    -bed wars apk indir for nubia red magic 6 pro
    -bed wars apk indir for realme gt neo
    -bed wars apk indir for motorola edge plus
    -bed wars apk indir for lg wing
    -bed wars apk indir for sony xperia 1 iii

    -

    The requirements for downloading Bed Wars APK indir

    -

    Before you download Bed Wars APK indir, you need to make sure that your device meets the following requirements:

    -
      -
    • It has Android 4.1 or higher
    • -
    • It has at least 100 MB of free storage space
    • -
    • It has a stable internet connection
    • -
    • It allows installation of unknown sources
    • -
    -

    To enable installation of unknown sources, you need to go to your device settings, security, and toggle on the option that says "allow installation of apps from unknown sources". This will allow you to install Bed Wars APK indir without any problems.

    -

    The steps for downloading Bed Wars APK indir

    -

    Once you have checked the requirements, you can follow these steps to download Bed Wars APK indir:

    -
      -
    1. Go to a trusted website that offers Bed Wars APK indir, such as [APKPure] or [APKCombo].
    2. -
    3. Search for Bed Wars or Garena Blockman GO in the search bar.
    4. -
    5. Select the latest version of the game and click on the download button.
    6. -
    7. Wait for the download to finish and locate the file in your device's downloads folder.
    8. -
    9. Tap on the file and follow the instructions to install it.
    10. -
    -

    The tips for installing and running Bed Wars APK indir

    -

    After you have installed Bed Wars APK indir, you can launch the game and start playing. However, there are some tips that you should keep in mind to ensure a smooth and enjoyable gaming experience:

    -
      -
    • Make sure that your device has enough battery and memory before playing.
    • -
    • Close any background apps that might slow down your device or interfere with the game.
    • -
    • Update the game regularly to get the latest features and bug fixes.
    • -
    • Avoid using any cheats or hacks that might get you banned or harm your device.
    • -
    • Join a reliable and fast network to avoid lag or disconnects.
    • -
    -

    How to play Bed Wars?

    -

    Now that you have downloaded and installed Bed Wars APK indir, you are ready to play the game. But how do you play it? What are the rules and strategies? What are the different modes and resources? Don't worry, we will answer all these questions and more in this section.

    -

    The modes of Bed Wars

    -

    Bed Wars has several modes that you can choose from, depending on your preference and skill level. Some of these modes are:

    -
      -
    • Solo: You play alone against 3 other players. You have one bed, one shop, and one resource generator. You need to survive and eliminate all your enemies.
    • -
    • Duo: You play with a partner against 3 other teams of 2 players each. You share one bed, one shop, and one resource generator with your partner. You need to cooperate and communicate with your partner to win the game.
    • -
    • Squad: You play with 3 other teammates against 3 other teams of 4 players each. You share one bed, one shop, and one resource generator with your team. You need to work together and coordinate with your team to win the game.
    • -
    • Rush: You play in a fast-paced mode where the beds are already exposed and there are no diamonds or emeralds. You need to rush to your enemies' islands and destroy their beds as soon as possible.
    • -
    • Ultimate: You play in a mode where you can use special abilities such as speed boost, jump boost, invisibility, etc. You need to use these abilities wisely and strategically to gain an advantage over your enemies.
    • -
    -

    The strategies of Bed Wars

    -

    Bed Wars is a game that requires both skill and strategy. You need to know how to use your items, how to build bridges, how to defend your bed, how to attack your enemies, how to manage your resources, etc. Here are some general tips that can help you improve

    your gameplay and strategy in Bed Wars:

    -
      -
    • Always protect your bed with blocks, wool, wood, end stone, obsidian, etc. You can also use traps, alarms, or fire charges to alert you of incoming enemies.
    • -
    • Always collect resources from your generator and from other islands. You can use them to buy better items and upgrades from the shop.
    • -
    • Always upgrade your resource generator, your team upgrades, and your personal upgrades. They can give you more resources, more health, more damage, more protection, etc.
    • -
    • Always build bridges to other islands, especially the ones with diamonds and emeralds. You can use them to buy the best items and upgrades in the game.
    • -
    • Always be aware of your surroundings and your enemies. You can use compasses, trackers, or maps to locate them. You can also use spy glasses, invisibility potions, or camouflage skins to sneak up on them.
    • -
    • Always attack your enemies when they are vulnerable or distracted. You can use TNT, fireballs, or explosives to destroy their beds or bridges. You can also use swords, bows, or snowballs to kill them or knock them off.
    • -
    • Always have a backup plan in case your bed is destroyed or your team is eliminated. You can use ender pearls, launch pads, or teleporters to escape or surprise your enemies. You can also use golden apples, potions, or gapples to heal or buff yourself.
    • -
    -

    The resources of Bed Wars

    -

    Bed Wars has four types of resources that you can collect and use in the game. They are:

    - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ResourceColorUse
    IronGrayBuy basic items such as blocks, weapons, armor, tools, etc.
    GoldYellowBuy advanced items such as bows, arrows, TNT, fireballs, etc.
    DiamondsBlueBuy team upgrades such as resource generator level, team protection level, team sharpness level, etc.
    EmeraldsGreenBuy personal upgrades such as diamond armor, diamond sword, ender pearls, invisibility potions, etc.
    -

    Conclusion

    -

    In conclusion, Bed Wars is a game that you should definitely try if you are looking for a fun and exciting PVP game that will test your skills and creativity. You can download it as an APK file from trusted websites and play it on your Android device without any hassle. You can also learn how to play it better by following the tips and strategies we have shared in this article. We hope you have enjoyed reading this article and learned something new about Bed Wars. Now go ahead and download Bed Wars APK indir and start playing!

    -

    FAQs

    -

    What is the difference between Bed Wars and Sky Wars?

    -

    Bed Wars and Sky Wars are both PVP games that involve fighting on islands in the sky. However, Bed Wars has beds that represent the team's life and need to be protected and destroyed. Sky Wars does not have beds and the goal is to be the last player or team alive.

    -

    How can I play Bed Wars with my friends?

    -

    You can play Bed Wars with your friends by joining the same server and inviting them to your party. You can also create a private room and invite them to join using a code.

    -

    How can I get free coins and gems in Bed Wars?

    -

    You can get free coins and gems in Bed Wars by completing achievements and daily tasks. You can also watch ads or participate in events to get more rewards.

    -

    How can I change my skin in Bed Wars?

    -

    You can change your skin in Bed Wars by going to the lobby and clicking on the wardrobe icon. You can choose from different skins that you have unlocked or bought with coins or gems.

    -

    How can I report a bug or a hacker in Bed Wars?

    -

    You can report a bug or a hacker in Bed Wars by going to the settings menu and clicking on the feedback button. You can fill out a form with the details of the issue and submit it to the developers.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fatmacankara/ASCARIS/code/main.py b/spaces/fatmacankara/ASCARIS/code/main.py deleted file mode 100644 index 4b757ccfbb05c197fa7c931b060ce0dbf23219af..0000000000000000000000000000000000000000 --- a/spaces/fatmacankara/ASCARIS/code/main.py +++ /dev/null @@ -1,35 +0,0 @@ -import pdb_featureVector -import alphafold_featureVector -import argparse - -parser = argparse.ArgumentParser(description='ASCARIS') - -parser.add_argument('-s', '--source_option', - help='Selection of input structure data.\n 1: PDB Structures (default), 2: AlphaFold Structures', - default=1) -parser.add_argument('-i', '--input_datapoint', - help='Input file or query datapoint\n Option 1: Comma-separated list of idenfiers (UniProt ID-wt residue-position-mutated residue (e.g. Q9Y4W6-N-432-T or Q9Y4W6-N-432-T, Q9Y4W6-N-432-T)) \n Option 2: Enter comma-separated file path') - -parser.add_argument('-impute', '--imputation_state', default='True', - help='Whether resulting feature vector should be imputed or not. Default True.') - -args = parser.parse_args() - -input_set = args.input_datapoint -mode = args.source_option -impute = args.imputation_state - -def run_featureVector(input_set, mode, impute): - print('*****************************************') - print('Feature vector generation is in progress. \nPlease check log file for updates..') - print('*****************************************') - mode = int(mode) - if mode == 1: - pdb_featureVector.pdb(input_set, mode, impute) - elif mode == 2: - alphafold_featureVector.alphafold(input_set, mode, impute) - -if __name__ == '__main__': - run_featureVector(input_set, mode, impute) - - diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" deleted file mode 100644 index 19381e5c27fb2aa4728a1b223fb5f86859e49623..0000000000000000000000000000000000000000 --- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" +++ /dev/null @@ -1,247 +0,0 @@ -from toolbox import update_ui, trimmed_format_exc, gen_time_str -from toolbox import CatchException, report_execption, write_results_to_file -fast_debug = False - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md") - print('Segmentation: done') - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self, language): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - with open(path + f'.{gen_time_str()}.{language}.md', 'w', encoding='utf8') as f: - manifest.append(path + f'.{gen_time_str()}.{language}.md') - f.write(res) - return manifest - -def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - # <-------- 读取Markdown文件,删除其中的所有注释 ----------> - pfg = PaperFileGroup() - - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - # 记录删除注释后的文本 - pfg.file_paths.append(fp) - pfg.file_contents.append(file_content) - - # <-------- 拆分过长的Markdown文件 ----------> - pfg.run_file_split(max_token_limit=1500) - n_split = len(pfg.sp_file_contents) - - # <-------- 多线程翻译开始 ----------> - if language == 'en->zh': - inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - elif language == 'zh->en': - inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - else: - inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # OpenAI所允许的最大并行过载 - scroller_max_len = 80 - ) - try: - pfg.sp_file_result = [] - for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - pfg.write_result(language) - except: - print(trimmed_format_exc()) - - # <-------- 整理结果,退出 ----------> - create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" - res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name) - history = gpt_response_collection - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -def get_files_from_everything(txt): - import glob, os - - success = True - if txt.startswith('http'): - # 网络的远程文件 - txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/") - txt = txt.replace("/blob/", "/") - import requests - from toolbox import get_conf - proxies, = get_conf('proxies') - r = requests.get(txt, proxies=proxies) - with open('./gpt_log/temp.md', 'wb+') as f: f.write(r.content) - project_folder = './gpt_log/' - file_manifest = ['./gpt_log/temp.md'] - elif txt.endswith('.md'): - # 直接给定文件 - file_manifest = [txt] - project_folder = os.path.dirname(txt) - elif os.path.exists(txt): - # 本地路径,递归搜索 - project_folder = txt - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)] - else: - success = False - - return success, file_manifest, project_folder - - -@CatchException -def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - import glob, os - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - - success, file_manifest, project_folder = get_files_from_everything(txt) - - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') - - - - - -@CatchException -def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - import glob, os - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - success, file_manifest, project_folder = get_files_from_everything(txt) - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') - - -@CatchException -def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - import glob, os - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - success, file_manifest, project_folder = get_files_from_everything(txt) - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - language = plugin_kwargs.get("advanced_arg", 'Chinese') - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language=language) \ No newline at end of file diff --git a/spaces/feng2022/styleganhuman_copy/torch_utils/models_face.py b/spaces/feng2022/styleganhuman_copy/torch_utils/models_face.py deleted file mode 100644 index ce3f5d2f3c41206c18a9dba973c8e5999ddf47fd..0000000000000000000000000000000000000000 --- a/spaces/feng2022/styleganhuman_copy/torch_utils/models_face.py +++ /dev/null @@ -1,809 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -import math -import random -import functools -import operator - -import torch -from torch import nn -from torch.nn import functional as F -import torch.nn.init as init -from torch.autograd import Function - -from .op_edit import FusedLeakyReLU, fused_leaky_relu, upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer("kernel", kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer("kernel", kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer("kernel", kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," - f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})" - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})" - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, " - f"upsample={self.upsample}, downsample={self.downsample})" - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - ) - - self.noise = NoiseInjection() - # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) - # self.activate = ScaledLeakyReLU(0.2) - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - # out = out + self.bias - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=1, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - small=False, - small_isaac=False, - ): - super().__init__() - - self.size = size - - if small and size > 64: - raise ValueError("small only works for sizes <= 64") - - self.style_dim = style_dim - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu" - ) - ) - - self.style = nn.Sequential(*layers) - - if small: - self.channels = { - 4: 64 * channel_multiplier, - 8: 64 * channel_multiplier, - 16: 64 * channel_multiplier, - 32: 64 * channel_multiplier, - 64: 64 * channel_multiplier, - } - elif small_isaac: - self.channels = {4: 256, 8: 256, 16: 256, 32: 256, 64: 128, 128: 128} - else: - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res] - self.noises.register_buffer( - "noise_{}".format(layer_idx), torch.randn(*shape) - ) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim)) - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - return_features=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - ): - if not input_is_latent: - # print("haha") - styles = [self.style(s) for s in styles] - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, "noise_{}".format(i)) - for i in range(self.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - # print(styles) - if len(styles) < 2: - inject_index = self.n_latent - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - # print("a") - else: - # print(len(styles)) - latent = styles[0] - # print("b", latent.shape) - - else: - # print("c") - if inject_index is None: - inject_index = 4 - - latent = styles[0].unsqueeze(0) - if latent.shape[1] == 1: - latent = latent.repeat(1, inject_index, 1) - else: - latent = latent[:, :inject_index, :] - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - - features = {} - out = self.input(latent) - features["out_0"] = out - out = self.conv1(out, latent[:, 0], noise=noise[0]) - features["conv1_0"] = out - - skip = self.to_rgb1(out, latent[:, 1]) - features["skip_0"] = skip - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - features["conv1_{}".format(i)] = out - out = conv2(out, latent[:, i + 1], noise=noise2) - features["conv2_{}".format(i)] = out - skip = to_rgb(out, latent[:, i + 2], skip) - features["skip_{}".format(i)] = skip - - i += 2 - - image = skip - - if return_latents: - return image, latent - elif return_features: - return image, features - else: - return image, None - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class StyleDiscriminator(nn.Module): - def __init__( - self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], small=False - ): - super().__init__() - - if small: - channels = {4: 64, 8: 64, 16: 64, 32: 64, 64: 64} - - else: - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(3, channels[size], 1)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"), - EqualLinear(channels[4], 1), - ) - -# def forward(self, input): -# out = self.convs(input) - -# batch, channel, height, width = out.shape -# group = min(batch, self.stddev_group) -# stddev = out.view( -# group, -1, self.stddev_feat, channel // self.stddev_feat, height, width -# ) -# stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) -# stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) -# stddev = stddev.repeat(group, 1, height, width) -# out = torch.cat([out, stddev], 1) - -# out = self.final_conv(out) - -# out = out.view(batch, -1) -# out = self.final_linear(out) - -# return out - - def forward(self, input): - h = input - h_list = [] - - for index, blocklist in enumerate(self.convs): - h = blocklist(h) - h_list.append(h) - - out = h - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - h_list.append(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out, h_list - - -class StyleEncoder(nn.Module): - def __init__(self, size, w_dim=512): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256, - 128: 128, - 256: 64, - 512: 32, - 1024: 16 - } - - self.w_dim = w_dim - log_size = int(math.log(size, 2)) - - # self.n_latents = log_size*2 - 2 - - convs = [ConvLayer(3, channels[size], 1)] - - in_channel = channels[size] - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - convs.append(ResBlock(in_channel, out_channel)) - in_channel = out_channel - - # convs.append(EqualConv2d(in_channel, self.n_latents*self.w_dim, 4, padding=0, bias=False)) - convs.append(EqualConv2d(in_channel,2*self.w_dim, 4, padding=0, bias=False)) - - - self.convs = nn.Sequential(*convs) - - def forward(self, input): - out = self.convs(input) - # return out.view(len(input), self.n_latents, self.w_dim) - reshaped = out.view(len(input), 2*self.w_dim) - return reshaped[:,:self.w_dim], reshaped[:,self.w_dim:] - -def kaiming_init(m): - if isinstance(m, (nn.Linear, nn.Conv2d)): - init.kaiming_normal_(m.weight) - if m.bias is not None: - m.bias.data.fill_(0) - elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): - m.weight.data.fill_(1) - if m.bias is not None: - m.bias.data.fill_(0) - - -def normal_init(m): - if isinstance(m, (nn.Linear, nn.Conv2d)): - init.normal_(m.weight, 0, 0.02) - if m.bias is not None: - m.bias.data.fill_(0) - elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): - m.weight.data.fill_(1) - if m.bias is not None: - m.bias.data.fill_(0) \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Cute Carnival A Pokemon-Inspired Game for Android - Download Now!.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Cute Carnival A Pokemon-Inspired Game for Android - Download Now!.md deleted file mode 100644 index e0c0aeaba74f1b472878b93b49afb69db479bc1c..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Cute Carnival A Pokemon-Inspired Game for Android - Download Now!.md +++ /dev/null @@ -1,120 +0,0 @@ -
    -

    Cute Carnival: A New Pokemon Game for Mobile Devices

    -

    If you are a fan of Pokemon and love playing mobile games, you might want to check out Cute Carnival, a new Pokemon game that is available on Google Play for Android devices. Cute Carnival is a mobile RPG online game that lets you summon, collect, battle, and evolve your favorite Pokemon in a colorful and fun world. In this article, we will tell you more about what Cute Carnival is, how to download and play it, and what other players and critics think about it.

    -

    cute carnival pokemon game download link


    Download File - https://gohhs.com/2uPt4D



    -

    What is Cute Carnival?

    -

    A mobile RPG online game based on Pokemon

    -

    Cute Carnival is a game developed by Riversgame, a studio that specializes in creating mobile games based on popular anime and manga franchises. The game is inspired by the Pokemon series, one of the most successful media franchises of all time, with over 440 million game units sold worldwide. Cute Carnival features many of the familiar Pokemon from the core series games, as well as some original ones created by the developers.

    -

    Features of Cute Carnival

    -

    Funny storyline and colorful graphics

    -

    In Cute Carnival, you play as a trainer who goes on an adventure to explore the world of Pokemon, meet new friends, and face various challenges. The game has a funny and engaging storyline that will keep you entertained as you progress through different stages and regions. The game also has colorful and cute graphics that capture the charm and personality of the Pokemon characters.

    -

    Stage challenge and cross-server arena modes

    -

    The game has two main modes of gameplay: stage challenge and cross-server arena. In stage challenge mode, you team up with other players to battle against powerful enemies and bosses. You can create your own team of Pokemon, or join a random team with other players. You can also customize your team formation and strategy before each battle. In cross-server arena mode, you compete against other players from different servers in real-time PvP battles. You can test your skills and rank up in various leagues and tournaments.

    -

    Evolution and transformation of Pokemon

    -

    One of the most exciting features of Cute Carnival is the evolution and transformation of Pokemon. You can evolve your Pokemon to unlock new abilities and appearances, as well as increase their combat power. You can also transform your Pokemon into different forms, such as Mega Evolution, Gigantamax, Dynamax, Z-Move, etc. These transformations are only possible in certain battles, and they can turn the tide of the fight in your favor.

    -

    How to Download and Play Cute Carnival?

    -

    Available on Google Play for Android devices

    -

    Cute Carnival is currently only available on Google Play for Android devices. The game is free to download and play, but it offers in-game purchases for some items and features. The game requires Android version 6.0 or higher, and at least 2GB of RAM. The game also requires an internet connection to play online.

    -

    cute carnival pokemon game apk download
    -pokemon cute carnival android ios gameplay
    -how to download cute carnival mobile rpg
    -cute carnival game review and tips
    -best pokemon games like cute carnival
    -cute carnival online rpg adventure
    -cute carnival summon and collect monsters
    -cute carnival game features and updates
    -cute carnival vs pokemon masters ex
    -download cute carnival for pc windows 10
    -cute carnival game guide and walkthrough
    -cute carnival game cheats and hacks
    -cute carnival game mod apk unlimited money
    -cute carnival game trailer and screenshots
    -cute carnival game system requirements and compatibility
    -cute carnival game customer service and support
    -cute carnival game forum and community
    -cute carnival game events and rewards
    -cute carnival game feedback and ratings
    -cute carnival game news and announcements
    -cute carnival game wiki and database
    -cute carnival game codes and coupons
    -cute carnival game faq and troubleshooting
    -cute carnival game fan art and cosplay
    -cute carnival game merchandise and products
    -cute carnival game developer and publisher
    -cute carnival game release date and version
    -cute carnival game genres and categories
    -cute carnival game languages and regions
    -cute carnival game bugs and issues
    -cute carnival game story and characters
    -cute carnival game graphics and sound
    -cute carnival game controls and settings
    -cute carnival game achievements and leaderboards
    -cute carnival game pvp and pve modes
    -cute carnival game clans and guilds
    -cute carnival game friends and chat
    -cute carnival game strategy and tips
    -cute carnival game skills and abilities
    -cute carnival game items and equipment

    -

    Steps to download and install the game

    -

    To download and install Cute Carnival on your Android device, follow these steps:

    -
      -
    1. Go to Google Play Store on your device.
    2. -
    3. Search for "Cute Carnival" or use this link to access the game page.
    4. -
    5. Tap on "Install" to start downloading the game.
    6. -
    7. Wait for the download process to finish.
    8. -
    9. Once the download is complete, tap on "Open" to launch the game.
    10. -
    11. Follow the instructions on the screen to create your account and start playing.
    12. -
    -

    Tips and tricks to enjoy the game

    -

    Here are some tips and tricks to help you enjoy Cute Carnival more:

    -
      -
    • Complete the tutorial and daily quests to earn rewards and learn the basics of the game.
    • -
    • Summon new Pokemon using crystals or tickets, and try to collect as many as you can.
    • -
    • Upgrade and evolve your Pokemon regularly to increase their power and unlock new skills.
    • -
    • Use the right type of Pokemon for each battle, and take advantage of their strengths and weaknesses.
    • -
    • Join a guild and make friends with other players. You can chat, trade, and cooperate with them in various events.
    • -
    • Participate in the cross-server arena and other PvP modes to challenge yourself and earn glory and prizes.
    • -
    -

    Reviews and Ratings of Cute Carnival

    -

    Positive feedback from players and critics

    -

    Cute Carnival has received positive feedback from both players and critics who have tried the game. The game has a 4.5-star rating on Google Play, with over 10,000 reviews. Many players have praised the game for its fun gameplay, cute graphics, and variety of Pokemon. Some of the comments from the players are:

    -
    -

    "This game is so fun and addictive. I love all the Pokemon and their transformations. The battles are exciting and challenging. The graphics are amazing and colorful. I recommend this game to all Pokemon fans."

    -

    "I have been playing this game for a week now and I can't stop. It's like a dream come true for me. I can summon, collect, battle, and evolve my favorite Pokemon. The game is very well-made and has a lot of content. It's definitely worth playing."

    -

    "This is one of the best Pokemon games I have ever played. It has everything I want in a Pokemon game: a funny story, a huge world, a lot of Pokemon, different modes, and online features. The game is very easy to play and enjoy. I love it."

    -
    -

    Comparison with other Pokemon games

    -

    Cute Carnival is not the only Pokemon game available on mobile devices. There are other games that are based on or related to the Pokemon franchise, such as Pokemon Go, Pokemon Masters EX, Pokemon Unite, etc. How does Cute Carnival compare with these games?

    -

    One of the main differences between Cute Carnival and other Pokemon games is that Cute Carnival is not an official game licensed by Nintendo or The Pokemon Company. It is a fan-made game that uses the Pokemon characters and elements without authorization. This means that Cute Carnival may not be as accurate or faithful to the original source material as the official games. It also means that Cute Carnival may face legal issues or be removed from Google Play in the future.

    -

    Another difference between Cute Carnival and other Pokemon games is that Cute Carnival focuses more on the RPG online aspect of the game, rather than the exploration or social aspect. Cute Carnival has a linear storyline that guides the player through different stages and regions, rather than an open-world map that allows the player to roam freely. Cute Carnival also has more emphasis on the team-based combat and PvP modes, rather than the catching or trading of Pokemon.

    -

    Areas for improvement and future updates

    -

    Cute Carnival is not a perfect game, and it has some areas for improvement and future updates. Some of the common complaints or suggestions from the players are:

    -
      -
    • The game is too repetitive and grindy at times. The player has to do the same tasks or battles over and over again to progress or earn rewards.
    • -
    • The game is too pay-to-win or unfair for free-to-play players. The player has to spend real money or a lot of time to get better Pokemon or items.
    • -
    • The game has some bugs or glitches that affect the gameplay or performance. The game may crash, freeze, lag, or display errors at times.
    • -
    • The game needs more content or features to keep the player interested. The game may add more Pokemon, regions, modes, events, etc.
    • -
    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, Cute Carnival is a new Pokemon game for mobile devices that offers a fun and engaging experience for fans of the franchise. The game lets you summon, collect, battle, and evolve your favorite Pokemon in a colorful and funny world. The game has two main modes: stage challenge and cross-server arena. The game also features the evolution and transformation of Pokemon, which can make them more powerful and cool-looking. The game is available on Google Play for Android devices, and it is free to download and play. The game has received positive feedback from players and critics, who have praised its gameplay, graphics, and variety of Pokemon. The game also has some areas for improvement and future updates, such as adding more content, fixing bugs, and balancing the game.

    -

    Recommendation and invitation to try the game

    -

    If you are looking for a new Pokemon game to play on your mobile device, we recommend you to try Cute Carnival. It is a fun and engaging game that will keep you entertained for hours. You can enjoy the game with your friends or other players online, and experience the thrill of battling and evolving your Pokemon. You can download the game from Google Play using this link , or scan this QR code:

    -QR code for Cute Carnival download link -

    We hope you enjoy Cute Carnival as much as we do, and we look forward to seeing you in the game.

    -

    FAQs

    -

    Here are some frequently asked questions about Cute Carnival:

    -
      -
    1. Q: Is Cute Carnival an official Pokemon game?
      -A: No, Cute Carnival is not an official Pokemon game licensed by Nintendo or The Pokemon Company. It is a fan-made game that uses the Pokemon characters and elements without authorization.
    2. -
    3. Q: Is Cute Carnival compatible with iOS devices?
      -A: No, Cute Carnival is currently only compatible with Android devices. There is no information about whether the game will be released for iOS devices in the future.
    4. -
    5. Q: How can I get more crystals or tickets to summon new Pokemon?
      -A: You can get more crystals or tickets by completing quests, participating in events, ranking up in the arena, or purchasing them with real money.
    6. -
    7. Q: How can I join a guild or make friends in Cute Carnival?
      -A: You can join a guild or make friends by tapping on the "Guild" or "Friends" icons on the main screen. You can search for a guild or a friend by name, ID, or server. You can also create your own guild or invite your friends to join you.
    8. -
    9. Q: How can I contact the customer service or report a problem in Cute Carnival?
      -A: You can contact the customer service or report a problem by tapping on the "Settings" icon on the main screen. You can then tap on the "Customer Service" or "Feedback" buttons to send a message or a screenshot to the developers.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download App Music Player The Best Free Music Player and Media Player.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download App Music Player The Best Free Music Player and Media Player.md deleted file mode 100644 index 6edbd4d111de05671759584d0b815758cece1ae5..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download App Music Player The Best Free Music Player and Media Player.md +++ /dev/null @@ -1,112 +0,0 @@ - -

    How to Download App Music Player for Android

    -

    If you love listening to music on your phone, you might want to consider downloading an app music player for Android. An app music player is a software application that allows you to play various types of audio files on your device, such as songs, podcasts, audiobooks, etc. Unlike the default music player that comes with your phone, an app music player can offer you more features and benefits that can enhance your listening experience.

    -

    download app music player


    DOWNLOADhttps://gohhs.com/2uPrBX



    -

    In this article, we will explain what app music players are and why they are useful for listening to music on the go. We will also review some of the best app music players for Android that you can download from the Google Play Store. Finally, we will show you how to download and install app music players for Android and how to use them effectively.

    -

    The Benefits of App Music Players

    -

    App music players can provide you with many advantages over the default music player on your phone. Here are some of the benefits of app music players:

    -

    Access to a variety of music sources

    -

    One of the main benefits of app music players is that they can give you access to a wide range of music sources that you can choose from. For example, you can stream music from popular online services like Spotify, YouTube Music, Amazon Music, Apple Music, etc. You can also play local files that you have stored on your phone or SD card. Some app music players also support podcasts, radio stations, online videos, and other types of audio content.

    -

    download app music player with equalizer
    -download app music player offline
    -download app music player for android
    -download app music player with lyrics
    -download app music player for pc
    -download app music player online
    -download app music player free
    -download app music player with bass booster
    -download app music player for iphone
    -download app music player with themes
    -download app music player and downloader
    -download app music player mp3
    -download app music player for windows 10
    -download app music player with playlist
    -download app music player pro
    -download app music player apk
    -download app music player for mac
    -download app music player with sound effects
    -download app music player bluetooth
    -download app music player best
    -download app music player hd
    -download app music player flac
    -download app music player for chromebook
    -download app music player with timer
    -download app music player premium
    -download app music player mod
    -download app music player for linux
    -download app music player with ringtone maker
    -download app music player aac
    -download app music player wav
    -download app music player for car
    -download app music player with podcast
    -download app music player plus
    -download app music player midi
    -download app music player for tv
    -download app music player with radio
    -download app music player lite
    -download app music player ape
    -download app music player for ipad
    -download app music player with shuffle mode
    -download app music player no ads
    -download app music player ogg
    -download app music player for laptop
    -download app music player with voice control
    -download app music player deluxe
    -download app music player zip
    -download app music player for tablet
    -download app music player with sleep mode
    -download app music player ultimate

    -

    Customization options

    -

    Another benefit of app music players is that they can allow you to customize your listening experience according to your preferences. For example, you can create and manage playlists of your favorite songs or genres. You can also adjust the sound quality and effects using an equalizer or other audio enhancements. Some app music players also let you change the theme or appearance of the app interface.

    -

    Offline mode

    -

    A third benefit of app music players is that they can enable you to listen to music offline without an internet connection. This can be useful when

    you are traveling, working, or in a place where there is no Wi-Fi or cellular data. Some app music players allow you to download music from their online sources and save them on your device for offline playback. You can also use offline mode to save data and battery life.

    -

    The Best App Music Players for Android

    -

    There are many app music players for Android that you can choose from, depending on your needs and preferences. Here are some of the best app music players for Android that we recommend:

    -

    Spotify

    -

    Spotify is one of the most popular music streaming services and app music players in the world. It has a huge library of over 70 million songs, podcasts, and playlists that you can access with a free or premium plan. With Spotify, you can discover new music, artists, and genres based on your taste and mood. You can also create your own playlists, follow your favorite artists, and share your music with your friends. Spotify also has an offline mode that lets you download up to 10,000 songs per device.

    -

    YouTube Music

    -

    YouTube Music is the successor of Google Play Music, which was discontinued in 2020. It is a music streaming service and app music player that lets you access a large library of songs and videos from YouTube. You can also import your own music files from your device or Google Drive. YouTube Music has a smart search feature that allows you to find songs by lyrics, mood, genre, or even humming. You can also enjoy personalized recommendations, curated playlists, and live performances. YouTube Music also has an offline mode that lets you download up to 100 songs per device.

    -

    Amazon Music

    -

    Amazon Music is the music streaming service and app music player from Amazon. It has two main plans: Prime Music and Music Unlimited. Prime Music is included with Amazon Prime membership and gives you access to over 2 million songs, playlists, and stations. Music Unlimited is a separate subscription that gives you access to over 70 million songs, podcasts, and exclusive content. Amazon Music also allows you to upload up to 250 songs from your device or computer to the cloud. Amazon Music also has an offline mode that lets you download songs to your device.

    -

    Apple Music

    -

    Apple Music is the music streaming service and app music player from Apple. It has a library of over 75 million songs, podcasts, and playlists that you can access with a monthly or annual subscription. Apple Music also has exclusive content and live radio stations from around the world. You can also sync your iTunes library and play your own music files on the app. Apple Music also has an offline mode that lets you download up to 100,000 songs per device.

    -

    How to Download and Install App Music Players for Android

    -

    Downloading and installing app music players for Android is easy and fast. Here are the steps you need to follow:

    -

    Step 1: Go to the Google Play Store and search for the app music player you want to download.

    -

    The Google Play Store is the official app store for Android devices where you can find and download millions of apps, games, books, movies, etc. To access the Google Play Store, you need to have a Google account and an internet connection. You can open the Google Play Store by tapping on its icon on your home screen or app drawer.

    -

    Once you open the Google Play Store, you can use the search bar at the top to type in the name of the app music player you want to download. For example, if you want to download Spotify, you can type in "Spotify" and tap on the search icon.

    -

    Step 2: Tap on the app icon and then tap on the Install button.

    -

    After you search for the app music player you want to download, you will see a list of results that match your query. Tap on the app icon that corresponds to the app music player you want to download. For example, if you want to download Spotify, tap on the Spotify icon that has a green circle with three white lines.

    -

    This will take you to the app page where you can see more information about the app, such as its description, rating, reviews, screenshots, etc. To download and install the app on your device, tap on the Install button that is usually located at the top right corner of the screen.

    -

    Step 3: Wait for the app to download and install on your device.

    -

    After you tap on the Install button, the app will start downloading and installing on your device automatically. You can see the progress of the download and installation by looking at the status bar at the top of the screen or by going back to the Google Play Store and tapping on the My apps & games tab. You can also cancel the download and installation by tapping on the X button next to the app icon.

    -

    The time it takes for the app to download and install depends on the size of the app, the speed of your internet connection, and the performance of your device. Usually, it takes a few minutes or less for most app music players to download and install.

    -

    Step 4: Open the app and sign in with your account or create a new one if needed.

    -

    After the app is downloaded and installed on your device, you can open it by tapping on its icon on your home screen or app drawer. Alternatively, you can also open it from the Google Play Store by tapping on the Open button on the app page.

    -

    When you open the app for the first time, you may need to sign in with your account or create a new one if you don't have one already. Some app music players require you to have an account to use their features, while others allow you to use them without an account. For example, Spotify requires you to have a Spotify account or a Facebook account to sign in, while YouTube Music allows you to use it as a guest without signing in.

    -

    To sign in with your account or create a new one, follow the instructions on the app screen. You may need to provide some information, such as your email address, password, username, etc. You may also need to agree to some terms and conditions and privacy policies before you can use the app.

    -

    How to Use App Music Players for Android

    -

    Once you have downloaded and installed an app music player for Android and signed in with your account, you can start using it to listen to music on your device. Here are some tips on how to use app music players for Android:

    -

    How to browse and play music from different sources

    -

    One of the main features of app music players is that they allow you to browse and play music from different sources, such as streaming services, local files, podcasts, etc. To browse and play music from different sources, you can use the app interface, which usually consists of tabs, menus, buttons, icons, etc. For example, Spotify has five tabs at the bottom of the screen: Home, Browse, Search, Library, and Premium. You can tap on each tab to access different sections of the app.

    -

    To play music from a source, you can tap on it and then tap on the play button or the song title. You can also swipe left or right to skip or go back to the previous or next song. You can also pause or resume the playback by tapping on the pause or play button at the bottom of the screen. You can also control the playback using your device's volume buttons or notification panel.

    -

    How to create and manage playlists, favorites, and downloads

    -

    Another feature of app music players is that they allow you to create and manage playlists, favorites, and downloads of your music. Playlists are collections of songs that you can create and organize according to your preferences. Favorites are songs that you like and want to save for easy access. Downloads are songs that you download to your device for offline playback.

    -

    To create and manage playlists, favorites, and downloads, you can use the app interface, which usually has options for adding, removing, editing, sorting, etc. For example, Spotify has a Library tab where you can access your playlists, favorites, and downloads. You can tap on the plus icon to create a new playlist, or tap on an existing playlist to edit it. You can also tap on the heart icon to add or remove a song from your favorites, or tap on the download icon to download or delete a song from your device.

    -

    How to adjust the settings, such as sound quality, equalizer, theme, etc.

    -

    A third feature of app music players is that they allow you to adjust the settings, such as sound quality, equalizer, theme, etc. to suit your preferences and needs. Settings are options that you can change to modify the behavior and appearance of the app. For example, Spotify has a Settings tab where you can access various settings, such as streaming quality, offline mode, equalizer, notifications, language, etc.

    -

    To adjust the settings, you can use the app interface, which usually has a menu or a button that leads you to the settings page. For example, Spotify has a gear icon at the top right corner of the screen that takes you to the Settings tab. You can tap on each setting to change its value or toggle it on or off.

    -

    Conclusion

    -

    App music players are software applications that allow you to play various types of audio files on your Android device. They can offer you many benefits, such as access to a variety of music sources, customization options, and offline mode. They can also enhance your listening experience and enjoyment of music.

    -

    In this article, we have explained what app music players are and why they are useful for listening to music on the go. We have also reviewed some of the best app music players for Android that you can download from the Google Play Store, such as Spotify, YouTube Music, Amazon Music, and Apple Music. We have also shown you how to download and install app music players for Android and how to use them effectively.

    -

    We hope that this article has helped you learn more about app music players for Android and how to download and use them. If you have any questions or feedback, please feel free to contact us or leave a comment below. Thank you for reading and happy listening!

    -

    FAQs

    -

    Here are some frequently asked questions about app music players for Android:

    -

    How can I transfer music files from my PC to my phone?

    -

    There are several ways to transfer music files from your PC to your phone. One of the easiest ways is to use a USB cable and connect your phone to your PC. Then, you can drag and drop the music files from your PC folder to your phone folder. Another way is to use a cloud service like Google Drive or Dropbox and upload your music files from your PC to the cloud. Then, you can download them from the cloud to your phone using the app or browser.

    -

    How can I sync music across my devices?

    -

    If you want to sync music across your devices, such as your phone, tablet, laptop, etc., you need to use an app music player that supports cross-device synchronization. For example, Spotify allows you to sync your music across your devices using your Spotify account. You can also use Spotify Connect to control the playback of one device from another device.

    -

    How can I fix common issues with app music players?

    -

    If you encounter any issues with app music players, such as crashes, errors, glitches, etc., you can try some of these troubleshooting steps: - Restart the app or your device. - Check your internet connection and make sure it is stable and fast. - Update the app or your device software to the latest version. - Clear the app cache and data or reinstall the app. - Contact the app developer or customer support for help.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fffiloni/train-dreambooth-lora-sdxl/train_dreambooth_lora_sdxl.py b/spaces/fffiloni/train-dreambooth-lora-sdxl/train_dreambooth_lora_sdxl.py deleted file mode 100644 index abd0bf3f9c99be2372335a6ddefd15443669cde4..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/train-dreambooth-lora-sdxl/train_dreambooth_lora_sdxl.py +++ /dev/null @@ -1,1508 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import argparse -import gc -import hashlib -import itertools -import logging -import math -import os -import shutil -import warnings -from pathlib import Path -from typing import Dict - -import numpy as np -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -import transformers -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import ProjectConfiguration, set_seed -from huggingface_hub import create_repo, upload_folder -from packaging import version -from PIL import Image -from PIL.ImageOps import exif_transpose -from torch.utils.data import Dataset -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import AutoTokenizer, PretrainedConfig - -import diffusers -from diffusers import ( - AutoencoderKL, - DDPMScheduler, - DPMSolverMultistepScheduler, - StableDiffusionXLPipeline, - UNet2DConditionModel, -) -from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict -from diffusers.models.attention_processor import LoRAAttnProcessor, LoRAAttnProcessor2_0 -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version, is_wandb_available -from diffusers.utils.import_utils import is_xformers_available - - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.22.0.dev0") - -logger = get_logger(__name__) - -def save_tempo_model_card( - repo_id: str, dataset_id=str, base_model=str, train_text_encoder=False, prompt=str, repo_folder=None, vae_path=None, last_checkpoint=str -): - - yaml = f""" ---- -base_model: {base_model} -instance_prompt: {prompt} -tags: -- stable-diffusion-xl -- stable-diffusion-xl-diffusers -- text-to-image -- diffusers -- lora -inference: false -datasets: -- {dataset_id} ---- - """ - model_card = f""" -# LoRA DreamBooth - {repo_id} -## MODEL IS CURRENTLY TRAINING ... -Last checkpoint saved: {last_checkpoint} -These are LoRA adaption weights for {base_model} trained on @fffiloni's SD-XL trainer. -The weights were trained on the concept prompt: -``` -{prompt} -``` -Use this keyword to trigger your custom model in your prompts. -LoRA for the text encoder was enabled: {train_text_encoder}. -Special VAE used for training: {vae_path}. -## Usage -Make sure to upgrade diffusers to >= 0.19.0: -``` -pip install diffusers --upgrade -``` -In addition make sure to install transformers, safetensors, accelerate as well as the invisible watermark: -``` -pip install invisible_watermark transformers accelerate safetensors -``` -To just use the base model, you can run: -```python -import torch -from diffusers import DiffusionPipeline, AutoencoderKL -device = "cuda" if torch.cuda.is_available() else "cpu" -vae = AutoencoderKL.from_pretrained('{vae_path}', torch_dtype=torch.float16) -pipe = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - vae=vae, torch_dtype=torch.float16, variant="fp16", - use_safetensors=True -) -pipe.to(device) -# This is where you load your trained weights -specific_safetensors = "pytorch_lora_weights.safetensors" -lora_scale = 0.9 -pipe.load_lora_weights( - '{repo_id}', - weight_name = specific_safetensors, - # use_auth_token = True -) -prompt = "A majestic {prompt} jumping from a big stone at night" -image = pipe( - prompt=prompt, - num_inference_steps=50, - cross_attention_kwargs={{"scale": lora_scale}} -).images[0] -``` -""" - with open(os.path.join(repo_folder, "README.md"), "w") as f: - f.write(yaml + model_card) - -def save_model_card( - repo_id: str, images=None, dataset_id=str, base_model=str, train_text_encoder=False, prompt=str, repo_folder=None, vae_path=None -): - img_str = "" - for i, image in enumerate(images): - image.save(os.path.join(repo_folder, f"image_{i}.png")) - img_str += f"![img_{i}](./image_{i}.png)\n" - - yaml = f""" ---- -base_model: {base_model} -instance_prompt: {prompt} -tags: -- stable-diffusion-xl -- stable-diffusion-xl-diffusers -- text-to-image -- diffusers -- lora -inference: false -datasets: -- {dataset_id} ---- - """ - model_card = f""" -# LoRA DreamBooth - {repo_id} -These are LoRA adaption weights for {base_model} trained on @fffiloni's SD-XL trainer. -The weights were trained on the concept prompt: -``` -{prompt} -``` -Use this keyword to trigger your custom model in your prompts. -LoRA for the text encoder was enabled: {train_text_encoder}. -Special VAE used for training: {vae_path}. -## Usage -Make sure to upgrade diffusers to >= 0.19.0: -``` -pip install diffusers --upgrade -``` -In addition make sure to install transformers, safetensors, accelerate as well as the invisible watermark: -``` -pip install invisible_watermark transformers accelerate safetensors -``` -To just use the base model, you can run: -```python -import torch -from diffusers import DiffusionPipeline, AutoencoderKL -device = "cuda" if torch.cuda.is_available() else "cpu" -vae = AutoencoderKL.from_pretrained('{vae_path}', torch_dtype=torch.float16) -pipe = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - vae=vae, torch_dtype=torch.float16, variant="fp16", - use_safetensors=True -) -pipe.to(device) -# This is where you load your trained weights -specific_safetensors = "pytorch_lora_weights.safetensors" -lora_scale = 0.9 -pipe.load_lora_weights( - '{repo_id}', - weight_name = specific_safetensors, - # use_auth_token = True -) -prompt = "A majestic {prompt} jumping from a big stone at night" -image = pipe( - prompt=prompt, - num_inference_steps=50, - cross_attention_kwargs={{"scale": lora_scale}} -).images[0] -``` -""" - with open(os.path.join(repo_folder, "README.md"), "w") as f: - f.write(yaml + model_card) - - -def import_model_class_from_model_name_or_path( - pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" -): - text_encoder_config = PretrainedConfig.from_pretrained( - pretrained_model_name_or_path, subfolder=subfolder, revision=revision - ) - model_class = text_encoder_config.architectures[0] - - if model_class == "CLIPTextModel": - from transformers import CLIPTextModel - - return CLIPTextModel - elif model_class == "CLIPTextModelWithProjection": - from transformers import CLIPTextModelWithProjection - - return CLIPTextModelWithProjection - else: - raise ValueError(f"{model_class} is not supported.") - - -def parse_args(input_args=None): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--pretrained_vae_model_name_or_path", - type=str, - default=None, - help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--dataset_id", - type=str, - default=None, - required=True, - help="The dataset ID you want to train images from", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - required=True, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default=None, - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--validation_prompt", - type=str, - default=None, - help="A prompt that is used during validation to verify that the model is learning.", - ) - parser.add_argument( - "--num_validation_images", - type=int, - default=4, - help="Number of images that should be generated during validation with `validation_prompt`.", - ) - parser.add_argument( - "--validation_epochs", - type=int, - default=50, - help=( - "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" - " `args.validation_prompt` multiple times: `args.num_validation_images`." - ), - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If there are not enough images already present in" - " class_data_dir, additional images will be sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="lora-dreambooth-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=1024, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--crops_coords_top_left_h", - type=int, - default=0, - help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), - ) - parser.add_argument( - "--crops_coords_top_left_w", - type=int, - default=0, - help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), - ) - parser.add_argument( - "--center_crop", - default=False, - action="store_true", - help=( - "Whether to center crop the input images to the resolution. If not set, the images will be randomly" - " cropped. The images will be resized to the resolution first before cropping." - ), - ) - parser.add_argument( - "--train_text_encoder", - action="store_true", - help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", - ) - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--checkpointing_steps", - type=int, - default=500, - help=( - "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" - " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" - " training using `--resume_from_checkpoint`." - ), - ) - parser.add_argument( - "--checkpoints_total_limit", - type=int, - default=None, - help=("Max number of checkpoints to store."), - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help=( - "Whether training should be resumed from a previous checkpoint. Use a path saved by" - ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' - ), - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--lr_num_cycles", - type=int, - default=1, - help="Number of hard resets of the lr in cosine_with_restarts scheduler.", - ) - parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") - parser.add_argument( - "--dataloader_num_workers", - type=int, - default=0, - help=( - "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." - ), - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--allow_tf32", - action="store_true", - help=( - "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" - " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' - ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default=None, - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" - " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." - ), - ) - parser.add_argument( - "--prior_generation_precision", - type=str, - default=None, - choices=["no", "fp32", "fp16", "bf16"], - help=( - "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument( - "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." - ) - parser.add_argument( - "--rank", - type=int, - default=4, - help=("The dimension of the LoRA update matrices."), - ) - - if input_args is not None: - args = parser.parse_args(input_args) - else: - args = parser.parse_args() - - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - if args.with_prior_preservation: - if args.class_data_dir is None: - raise ValueError("You must specify a data directory for class images.") - if args.class_prompt is None: - raise ValueError("You must specify prompt for class images.") - else: - # logger is not available yet - if args.class_data_dir is not None: - warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") - if args.class_prompt is not None: - warnings.warn("You need not use --class_prompt without --with_prior_preservation.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images. - """ - - def __init__( - self, - instance_data_root, - class_data_root=None, - class_num=None, - size=1024, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self._length = self.num_instance_images - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - if class_num is not None: - self.num_class_images = min(len(self.class_images_path), class_num) - else: - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) - instance_image = exif_transpose(instance_image) - - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - example["instance_images"] = self.image_transforms(instance_image) - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - class_image = exif_transpose(class_image) - - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - - return example - - -def collate_fn(examples, with_prior_preservation=False): - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if with_prior_preservation: - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - batch = {"pixel_values": pixel_values} - return batch - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - - -def tokenize_prompt(tokenizer, prompt): - text_inputs = tokenizer( - prompt, - padding="max_length", - max_length=tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - return text_input_ids - - -# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt -def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None): - prompt_embeds_list = [] - - for i, text_encoder in enumerate(text_encoders): - if tokenizers is not None: - tokenizer = tokenizers[i] - text_input_ids = tokenize_prompt(tokenizer, prompt) - else: - assert text_input_ids_list is not None - text_input_ids = text_input_ids_list[i] - - prompt_embeds = text_encoder( - text_input_ids.to(text_encoder.device), - output_hidden_states=True, - ) - - # We are only ALWAYS interested in the pooled output of the final text encoder - pooled_prompt_embeds = prompt_embeds[0] - prompt_embeds = prompt_embeds.hidden_states[-2] - bs_embed, seq_len, _ = prompt_embeds.shape - prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) - prompt_embeds_list.append(prompt_embeds) - - prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) - pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) - return prompt_embeds, pooled_prompt_embeds - - -def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]: - """ - Returns: - a state dict containing just the attention processor parameters. - """ - attn_processors = unet.attn_processors - - attn_processors_state_dict = {} - - for attn_processor_key, attn_processor in attn_processors.items(): - for parameter_key, parameter in attn_processor.state_dict().items(): - attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter - - return attn_processors_state_dict - - -def main(args): - logging_dir = Path(args.output_dir, args.logging_dir) - - accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with=args.report_to, - project_config=accelerator_project_config, - ) - - if args.report_to == "wandb": - if not is_wandb_available(): - raise ImportError("Make sure to install wandb if you want to use it for logging during training.") - import wandb - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state, main_process_only=False) - if accelerator.is_local_main_process: - transformers.utils.logging.set_verbosity_warning() - diffusers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - diffusers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Generate class images if prior preservation is enabled. - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - if args.prior_generation_precision == "fp32": - torch_dtype = torch.float32 - elif args.prior_generation_precision == "fp16": - torch_dtype = torch.float16 - elif args.prior_generation_precision == "bf16": - torch_dtype = torch.bfloat16 - pipeline = StableDiffusionXLPipeline.from_pretrained( - args.pretrained_model_name_or_path, - torch_dtype=torch_dtype, - revision=args.revision, - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - hash_image = hashlib.sha1(image.tobytes()).hexdigest() - image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" - image.save(image_filename) - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, private=True, token=args.hub_token - ).repo_id - - # Load the tokenizers - tokenizer_one = AutoTokenizer.from_pretrained( - args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False - ) - tokenizer_two = AutoTokenizer.from_pretrained( - args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False - ) - - # import correct text encoder classes - text_encoder_cls_one = import_model_class_from_model_name_or_path( - args.pretrained_model_name_or_path, args.revision - ) - text_encoder_cls_two = import_model_class_from_model_name_or_path( - args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" - ) - - # Load scheduler and models - noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") - text_encoder_one = text_encoder_cls_one.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - text_encoder_two = text_encoder_cls_two.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision - ) - vae_path = ( - args.pretrained_model_name_or_path - if args.pretrained_vae_model_name_or_path is None - else args.pretrained_vae_model_name_or_path - ) - vae = AutoencoderKL.from_pretrained( - vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision - ) - unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision - ) - - # We only train the additional adapter LoRA layers - vae.requires_grad_(False) - text_encoder_one.requires_grad_(False) - text_encoder_two.requires_grad_(False) - unet.requires_grad_(False) - - # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision - # as these weights are only used for inference, keeping weights in full precision is not required. - weight_dtype = torch.float32 - if accelerator.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif accelerator.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move unet, vae and text_encoder to device and cast to weight_dtype - unet.to(accelerator.device, dtype=weight_dtype) - - # The VAE is always in float32 to avoid NaN losses. - vae.to(accelerator.device, dtype=torch.float32) - - text_encoder_one.to(accelerator.device, dtype=weight_dtype) - text_encoder_two.to(accelerator.device, dtype=weight_dtype) - - if args.enable_xformers_memory_efficient_attention: - if is_xformers_available(): - import xformers - - xformers_version = version.parse(xformers.__version__) - if xformers_version == version.parse("0.0.16"): - logger.warn( - "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." - ) - unet.enable_xformers_memory_efficient_attention() - else: - raise ValueError("xformers is not available. Make sure it is installed correctly") - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder_one.gradient_checkpointing_enable() - text_encoder_two.gradient_checkpointing_enable() - - # now we will add new LoRA weights to the attention layers - # Set correct lora layers - unet_lora_attn_procs = {} - unet_lora_parameters = [] - for name, attn_processor in unet.attn_processors.items(): - cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = unet.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(unet.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = unet.config.block_out_channels[block_id] - - lora_attn_processor_class = ( - LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor - ) - module = lora_attn_processor_class( - hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=args.rank - ) - unet_lora_attn_procs[name] = module - unet_lora_parameters.extend(module.parameters()) - - unet.set_attn_processor(unet_lora_attn_procs) - - # The text encoder comes from 🤗 transformers, so we cannot directly modify it. - # So, instead, we monkey-patch the forward calls of its attention-blocks. - if args.train_text_encoder: - # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16 - text_lora_parameters_one = LoraLoaderMixin._modify_text_encoder( - text_encoder_one, dtype=torch.float32, rank=args.rank - ) - text_lora_parameters_two = LoraLoaderMixin._modify_text_encoder( - text_encoder_two, dtype=torch.float32, rank=args.rank - ) - - # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format - def save_model_hook(models, weights, output_dir): - if accelerator.is_main_process: - # there are only two options here. Either are just the unet attn processor layers - # or there are the unet and text encoder atten layers - unet_lora_layers_to_save = None - text_encoder_one_lora_layers_to_save = None - text_encoder_two_lora_layers_to_save = None - - for model in models: - if isinstance(model, type(accelerator.unwrap_model(unet))): - unet_lora_layers_to_save = unet_attn_processors_state_dict(model) - elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))): - text_encoder_one_lora_layers_to_save = text_encoder_lora_state_dict(model) - elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))): - text_encoder_two_lora_layers_to_save = text_encoder_lora_state_dict(model) - else: - raise ValueError(f"unexpected save model: {model.__class__}") - - # make sure to pop weight so that corresponding model is not saved again - weights.pop() - - StableDiffusionXLPipeline.save_lora_weights( - output_dir, - unet_lora_layers=unet_lora_layers_to_save, - text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, - text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save, - ) - - def load_model_hook(models, input_dir): - unet_ = None - text_encoder_one_ = None - text_encoder_two_ = None - - while len(models) > 0: - model = models.pop() - - if isinstance(model, type(accelerator.unwrap_model(unet))): - unet_ = model - elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))): - text_encoder_one_ = model - elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))): - text_encoder_two_ = model - else: - raise ValueError(f"unexpected save model: {model.__class__}") - - lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir) - LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_) - - text_encoder_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder." in k} - LoraLoaderMixin.load_lora_into_text_encoder( - text_encoder_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_ - ) - - text_encoder_2_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder_2." in k} - LoraLoaderMixin.load_lora_into_text_encoder( - text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_two_ - ) - - accelerator.register_save_state_pre_hook(save_model_hook) - accelerator.register_load_state_pre_hook(load_model_hook) - - # Enable TF32 for faster training on Ampere GPUs, - # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices - if args.allow_tf32: - torch.backends.cuda.matmul.allow_tf32 = True - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - # Optimizer creation - params_to_optimize = ( - itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two) - if args.train_text_encoder - else unet_lora_parameters - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - # Computes additional embeddings/ids required by the SDXL UNet. - # regular text emebddings (when `train_text_encoder` is not True) - # pooled text embeddings - # time ids - - def compute_time_ids(): - # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids - original_size = (args.resolution, args.resolution) - target_size = (args.resolution, args.resolution) - crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w) - add_time_ids = list(original_size + crops_coords_top_left + target_size) - add_time_ids = torch.tensor([add_time_ids]) - add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) - return add_time_ids - - if not args.train_text_encoder: - tokenizers = [tokenizer_one, tokenizer_two] - text_encoders = [text_encoder_one, text_encoder_two] - - def compute_text_embeddings(prompt, text_encoders, tokenizers): - with torch.no_grad(): - prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt) - prompt_embeds = prompt_embeds.to(accelerator.device) - pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device) - return prompt_embeds, pooled_prompt_embeds - - # Handle instance prompt. - instance_time_ids = compute_time_ids() - if not args.train_text_encoder: - instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings( - args.instance_prompt, text_encoders, tokenizers - ) - - # Handle class prompt for prior-preservation. - if args.with_prior_preservation: - class_time_ids = compute_time_ids() - if not args.train_text_encoder: - class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings( - args.class_prompt, text_encoders, tokenizers - ) - - # Clear the memory here. - if not args.train_text_encoder: - del tokenizers, text_encoders - gc.collect() - torch.cuda.empty_cache() - - # Pack the statically computed variables appropriately. This is so that we don't - # have to pass them to the dataloader. - add_time_ids = instance_time_ids - if args.with_prior_preservation: - add_time_ids = torch.cat([add_time_ids, class_time_ids], dim=0) - - if not args.train_text_encoder: - prompt_embeds = instance_prompt_hidden_states - unet_add_text_embeds = instance_pooled_prompt_embeds - if args.with_prior_preservation: - prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0) - unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0) - else: - tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt) - tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt) - if args.with_prior_preservation: - class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt) - class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt) - tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0) - tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0) - - # Dataset and DataLoaders creation: - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_num=args.num_class_images, - size=args.resolution, - center_crop=args.center_crop, - ) - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, - batch_size=args.train_batch_size, - shuffle=True, - collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), - num_workers=args.dataloader_num_workers, - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps * accelerator.num_processes, - num_cycles=args.lr_num_cycles, - power=args.lr_power, - ) - - # Prepare everything with our `accelerator`. - if args.train_text_encoder: - unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth-lora-sd-xl", config=vars(args)) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - global_step = 0 - first_epoch = 0 - - # Potentially load in the weights and states from a previous save - if args.resume_from_checkpoint: - if args.resume_from_checkpoint != "latest": - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the mos recent checkpoint - dirs = os.listdir(args.output_dir) - dirs = [d for d in dirs if d.startswith("checkpoint")] - dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) - path = dirs[-1] if len(dirs) > 0 else None - - if path is None: - accelerator.print( - f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." - ) - args.resume_from_checkpoint = None - initial_global_step = 0 - else: - accelerator.print(f"Resuming from checkpoint {path}") - accelerator.load_state(os.path.join(args.output_dir, path)) - global_step = int(path.split("-")[1]) - - initial_global_step = global_step - first_epoch = global_step // num_update_steps_per_epoch - - else: - initial_global_step = 0 - - progress_bar = tqdm( - range(0, args.max_train_steps), - initial=initial_global_step, - desc="Steps", - # Only show the progress bar once on each machine. - disable=not accelerator.is_local_main_process, - ) - - for epoch in range(first_epoch, args.num_train_epochs): - # Print a message for each epoch - print(f"Epoch {epoch}: Training in progress...") - unet.train() - if args.train_text_encoder: - text_encoder_one.train() - text_encoder_two.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - pixel_values = batch["pixel_values"].to(dtype=vae.dtype) - - # Convert images to latent space - model_input = vae.encode(pixel_values).latent_dist.sample() - model_input = model_input * vae.config.scaling_factor - if args.pretrained_vae_model_name_or_path is None: - model_input = model_input.to(weight_dtype) - - # Sample noise that we'll add to the latents - noise = torch.randn_like(model_input) - bsz = model_input.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint( - 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device - ) - timesteps = timesteps.long() - - # Add noise to the model input according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) - - # Calculate the elements to repeat depending on the use of prior-preservation. - elems_to_repeat = bsz // 2 if args.with_prior_preservation else bsz - - # Predict the noise residual - if not args.train_text_encoder: - unet_added_conditions = { - "time_ids": add_time_ids.repeat(elems_to_repeat, 1), - "text_embeds": unet_add_text_embeds.repeat(elems_to_repeat, 1), - } - prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat, 1, 1) - model_pred = unet( - noisy_model_input, - timesteps, - prompt_embeds_input, - added_cond_kwargs=unet_added_conditions, - ).sample - else: - unet_added_conditions = {"time_ids": add_time_ids.repeat(elems_to_repeat, 1)} - prompt_embeds, pooled_prompt_embeds = encode_prompt( - text_encoders=[text_encoder_one, text_encoder_two], - tokenizers=None, - prompt=None, - text_input_ids_list=[tokens_one, tokens_two], - ) - unet_added_conditions.update({"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat, 1)}) - prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat, 1, 1) - model_pred = unet( - noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions - ).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(model_input, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two) - if args.train_text_encoder - else unet_lora_parameters - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - # Print a message for each step - print(f"Step {global_step}/{args.max_train_steps}: Done") - progress_bar.update(1) - global_step += 1 - - if accelerator.is_main_process: - if global_step % args.checkpointing_steps == 0: - # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` - if args.checkpoints_total_limit is not None: - checkpoints = os.listdir(args.output_dir) - checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] - checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) - - # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints - if len(checkpoints) >= args.checkpoints_total_limit: - num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 - removing_checkpoints = checkpoints[0:num_to_remove] - - logger.info( - f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" - ) - logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") - - for removing_checkpoint in removing_checkpoints: - removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) - shutil.rmtree(removing_checkpoint) - - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - accelerator.save_state(save_path) - logger.info(f"Saved state to {save_path}") - - save_tempo_model_card( - repo_id, - dataset_id=args.dataset_id, - base_model=args.pretrained_model_name_or_path, - train_text_encoder=args.train_text_encoder, - prompt=args.instance_prompt, - repo_folder=args.output_dir, - vae_path=args.pretrained_vae_model_name_or_path, - last_checkpoint = f"checkpoint-{global_step}" - ) - - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message=f"saving checkpoint-{global_step}", - ignore_patterns=["step_*", "epoch_*"], - token=args.hub_token - ) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if accelerator.is_main_process: - if args.validation_prompt is not None and epoch % args.validation_epochs == 0: - logger.info( - f"Running validation... \n Generating {args.num_validation_images} images with prompt:" - f" {args.validation_prompt}." - ) - # create pipeline - if not args.train_text_encoder: - text_encoder_one = text_encoder_cls_one.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - text_encoder_two = text_encoder_cls_two.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision - ) - pipeline = StableDiffusionXLPipeline.from_pretrained( - args.pretrained_model_name_or_path, - vae=vae, - text_encoder=accelerator.unwrap_model(text_encoder_one), - text_encoder_2=accelerator.unwrap_model(text_encoder_two), - unet=accelerator.unwrap_model(unet), - revision=args.revision, - torch_dtype=weight_dtype, - ) - - # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it - scheduler_args = {} - - if "variance_type" in pipeline.scheduler.config: - variance_type = pipeline.scheduler.config.variance_type - - if variance_type in ["learned", "learned_range"]: - variance_type = "fixed_small" - - scheduler_args["variance_type"] = variance_type - - pipeline.scheduler = DPMSolverMultistepScheduler.from_config( - pipeline.scheduler.config, **scheduler_args - ) - - pipeline = pipeline.to(accelerator.device) - pipeline.set_progress_bar_config(disable=True) - - # run inference - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None - pipeline_args = {"prompt": args.validation_prompt} - - with torch.cuda.amp.autocast(): - images = [ - pipeline(**pipeline_args, generator=generator).images[0] - for _ in range(args.num_validation_images) - ] - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") - if tracker.name == "wandb": - tracker.log( - { - "validation": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompt}") - for i, image in enumerate(images) - ] - } - ) - - del pipeline - torch.cuda.empty_cache() - - # Save the lora layers - accelerator.wait_for_everyone() - if accelerator.is_main_process: - unet = accelerator.unwrap_model(unet) - unet = unet.to(torch.float32) - unet_lora_layers = unet_attn_processors_state_dict(unet) - - if args.train_text_encoder: - text_encoder_one = accelerator.unwrap_model(text_encoder_one) - text_encoder_lora_layers = text_encoder_lora_state_dict(text_encoder_one.to(torch.float32)) - text_encoder_two = accelerator.unwrap_model(text_encoder_two) - text_encoder_2_lora_layers = text_encoder_lora_state_dict(text_encoder_two.to(torch.float32)) - else: - text_encoder_lora_layers = None - text_encoder_2_lora_layers = None - - StableDiffusionXLPipeline.save_lora_weights( - save_directory=args.output_dir, - unet_lora_layers=unet_lora_layers, - text_encoder_lora_layers=text_encoder_lora_layers, - text_encoder_2_lora_layers=text_encoder_2_lora_layers, - ) - - # Final inference - # Load previous pipeline - vae = AutoencoderKL.from_pretrained( - vae_path, - subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, - revision=args.revision, - torch_dtype=weight_dtype, - ) - pipeline = StableDiffusionXLPipeline.from_pretrained( - args.pretrained_model_name_or_path, vae=vae, revision=args.revision, torch_dtype=weight_dtype - ) - - # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it - scheduler_args = {} - - if "variance_type" in pipeline.scheduler.config: - variance_type = pipeline.scheduler.config.variance_type - - if variance_type in ["learned", "learned_range"]: - variance_type = "fixed_small" - - scheduler_args["variance_type"] = variance_type - - pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args) - - # load attention processors - pipeline.load_lora_weights(args.output_dir) - - # run inference - images = [] - if args.validation_prompt and args.num_validation_images > 0: - pipeline = pipeline.to(accelerator.device) - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None - images = [ - pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] - for _ in range(args.num_validation_images) - ] - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") - if tracker.name == "wandb": - tracker.log( - { - "test": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompt}") - for i, image in enumerate(images) - ] - } - ) - - if args.push_to_hub: - save_model_card( - repo_id, - images=images, - dataset_id=args.dataset_id, - base_model=args.pretrained_model_name_or_path, - train_text_encoder=args.train_text_encoder, - prompt=args.instance_prompt, - repo_folder=args.output_dir, - vae_path=args.pretrained_vae_model_name_or_path, - ) - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - token=args.hub_token - ) - - accelerator.end_training() - -if __name__ == "__main__": - args = parse_args() - main(args) \ No newline at end of file diff --git a/spaces/florim/MedGPT/autogpt/json_utils/json_fix_general.py b/spaces/florim/MedGPT/autogpt/json_utils/json_fix_general.py deleted file mode 100644 index 7010fa3b9c1909de0e5a7f6ec13ca8aa418fe6c7..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/json_utils/json_fix_general.py +++ /dev/null @@ -1,124 +0,0 @@ -"""This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing -common JSON formatting issues.""" -from __future__ import annotations - -import contextlib -import json -import re -from typing import Optional - -from autogpt.config import Config -from autogpt.json_utils.utilities import extract_char_position - -CFG = Config() - - -def fix_invalid_escape(json_to_load: str, error_message: str) -> str: - """Fix invalid escape sequences in JSON strings. - - Args: - json_to_load (str): The JSON string. - error_message (str): The error message from the JSONDecodeError - exception. - - Returns: - str: The JSON string with invalid escape sequences fixed. - """ - while error_message.startswith("Invalid \\escape"): - bad_escape_location = extract_char_position(error_message) - json_to_load = ( - json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :] - ) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - fix invalid escape", e) - error_message = str(e) - return json_to_load - - -def balance_braces(json_string: str) -> Optional[str]: - """ - Balance the braces in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with braces balanced. - """ - - open_braces_count = json_string.count("{") - close_braces_count = json_string.count("}") - - while open_braces_count > close_braces_count: - json_string += "}" - close_braces_count += 1 - - while close_braces_count > open_braces_count: - json_string = json_string.rstrip("}") - close_braces_count -= 1 - - with contextlib.suppress(json.JSONDecodeError): - json.loads(json_string) - return json_string - - -def add_quotes_to_property_names(json_string: str) -> str: - """ - Add quotes to property names in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with quotes added to property names. - """ - - def replace_func(match: re.Match) -> str: - return f'"{match[1]}":' - - property_name_pattern = re.compile(r"(\w+):") - corrected_json_string = property_name_pattern.sub(replace_func, json_string) - - try: - json.loads(corrected_json_string) - return corrected_json_string - except json.JSONDecodeError as e: - raise e - - -def correct_json(json_to_load: str) -> str: - """ - Correct common JSON errors. - Args: - json_to_load (str): The JSON string. - """ - - try: - if CFG.debug_mode: - print("json", json_to_load) - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error", e) - error_message = str(e) - if error_message.startswith("Invalid \\escape"): - json_to_load = fix_invalid_escape(json_to_load, error_message) - if error_message.startswith( - "Expecting property name enclosed in double quotes" - ): - json_to_load = add_quotes_to_property_names(json_to_load) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - add quotes", e) - error_message = str(e) - if balanced_str := balance_braces(json_to_load): - return balanced_str - return json_to_load diff --git a/spaces/fun-research/FC-CLIP/datasets/prepare_pascal_ctx_sem_seg.py b/spaces/fun-research/FC-CLIP/datasets/prepare_pascal_ctx_sem_seg.py deleted file mode 100644 index ea817bc8738c5974adfb1d0ef447548f8ee55e36..0000000000000000000000000000000000000000 --- a/spaces/fun-research/FC-CLIP/datasets/prepare_pascal_ctx_sem_seg.py +++ /dev/null @@ -1,84 +0,0 @@ -# ------------------------------------------------------------------------------ -# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# This work is made available under the Nvidia Source Code License. -# To view a copy of this license, visit -# https://github.com/NVlabs/ODISE/blob/main/LICENSE -# -# Written by Jiarui Xu -# ------------------------------------------------------------------------------ - -import os -from pathlib import Path -import shutil - -import numpy as np -import tqdm -from PIL import Image -import multiprocessing as mp -import functools -from detail import Detail - -# fmt: off -_mapping = np.sort( - np.array([ - 0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, 284, - 158, 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, 59, - 440, 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355, - 85, 347, 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115 - ])) -# fmt: on -_key = np.array(range(len(_mapping))).astype("uint8") - - -def generate_labels(img_info, detail_api, out_dir): - def _class_to_index(mask, _mapping, _key): - # assert the values - values = np.unique(mask) - for i in range(len(values)): - assert values[i] in _mapping - index = np.digitize(mask.ravel(), _mapping, right=True) - return _key[index].reshape(mask.shape) - - sem_seg = _class_to_index(detail_api.getMask(img_info), _mapping=_mapping, _key=_key) - sem_seg = sem_seg - 1 # 0 (ignore) becomes 255. others are shifted by 1 - filename = img_info["file_name"] - - Image.fromarray(sem_seg).save(out_dir / filename.replace("jpg", "png")) - - -def copy_images(img_info, img_dir, out_dir): - filename = img_info["file_name"] - shutil.copy2(img_dir / filename, out_dir / filename) - - -if __name__ == "__main__": - dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "pascal_ctx_d2" - voc_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "VOCdevkit/VOC2010" - for split in ["training", "validation"]: - img_dir = voc_dir / "JPEGImages" - if split == "training": - detail_api = Detail(voc_dir / "trainval_merged.json", img_dir, "train") - else: - detail_api = Detail(voc_dir / "trainval_merged.json", img_dir, "val") - img_infos = detail_api.getImgs() - - output_img_dir = dataset_dir / "images" / split - output_ann_dir = dataset_dir / "annotations_ctx59" / split - - output_img_dir.mkdir(parents=True, exist_ok=True) - output_ann_dir.mkdir(parents=True, exist_ok=True) - - pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4)) - - pool.map( - functools.partial(copy_images, img_dir=img_dir, out_dir=output_img_dir), - tqdm.tqdm(img_infos, desc=f"Writing {split} images to {output_img_dir} ..."), - chunksize=100, - ) - - pool.map( - functools.partial(generate_labels, detail_api=detail_api, out_dir=output_ann_dir), - tqdm.tqdm(img_infos, desc=f"Writing {split} images to {output_ann_dir} ..."), - chunksize=100, - ) \ No newline at end of file diff --git a/spaces/fuqiang/txt2pic/app.py b/spaces/fuqiang/txt2pic/app.py deleted file mode 100644 index 547ebb0ece45013907ab6c678b7ef283dc5d3bda..0000000000000000000000000000000000000000 --- a/spaces/fuqiang/txt2pic/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import gradio as gr -import openai -from loguru import logger - -openai.api_type = "azure" -openai.api_base = os.getenv("OPENAI_API_BASE") -openai.api_version = "2023-06-01-preview" -openai.api_key = os.getenv("OPENAI_API_KEY") - -def txt2images(input,radio): - logger.info("输入参数prompt:"+input+",szie:"+radio) - if input==None or input=='': - return None - response = openai.Image.create( - prompt=input, - size=radio, - n=1 - ) - image_url = response["data"][0]["url"] - logger.info("生成图片url:"+image_url) - return image_url - -with gr.Blocks() as demo: - demo.title='文生图' - with gr.Column(): - radio = gr.Radio(["1024x1024", "512x512", "256x256"],label="选择尺寸",value="1024x1024") - input=gr.Textbox(placeholder="输入提示词",show_label=False) - submit=gr.Button("生成图片") - img = gr.Image(type="filepath",height=400, show_label=False) - submit.click(txt2images,[input,radio],[img]) -demo.queue(api_open=False).launch() \ No newline at end of file diff --git a/spaces/gagan3012/project-code-py/README.md b/spaces/gagan3012/project-code-py/README.md deleted file mode 100644 index 6a04701cd04b80ceb55f76fe1a7becfc972be32a..0000000000000000000000000000000000000000 --- a/spaces/gagan3012/project-code-py/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Project Code Py -emoji: 🐠 -colorFrom: indigo -colorTo: pink -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/gagan3012/summarization/t5s/__init__.py b/spaces/gagan3012/summarization/t5s/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/gligen/demo/gligen/ldm/models/diffusion/ddim.py b/spaces/gligen/demo/gligen/ldm/models/diffusion/ddim.py deleted file mode 100644 index ef5603ae921ee3ad88a1b5914201c1385bee3a2a..0000000000000000000000000000000000000000 --- a/spaces/gligen/demo/gligen/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,134 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like - - -class DDIMSampler(object): - def __init__(self, diffusion, model, schedule="linear", alpha_generator_func=None, set_alpha_scale=None): - super().__init__() - self.diffusion = diffusion - self.model = model - self.device = diffusion.betas.device - self.ddpm_num_timesteps = diffusion.num_timesteps - self.schedule = schedule - self.alpha_generator_func = alpha_generator_func - self.set_alpha_scale = set_alpha_scale - - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - attr = attr.to(self.device) - setattr(self, name, attr) - - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=False) - alphas_cumprod = self.diffusion.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device) - - self.register_buffer('betas', to_torch(self.diffusion.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.diffusion.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=False) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - - @torch.no_grad() - def sample(self, S, shape, input, uc=None, guidance_scale=1, mask=None, x0=None): - self.make_schedule(ddim_num_steps=S) - return self.ddim_sampling(shape, input, uc, guidance_scale, mask=mask, x0=x0) - - - @torch.no_grad() - def ddim_sampling(self, shape, input, uc, guidance_scale=1, mask=None, x0=None): - b = shape[0] - - img = input["x"] - if img == None: - img = torch.randn(shape, device=self.device) - input["x"] = img - - - time_range = np.flip(self.ddim_timesteps) - total_steps = self.ddim_timesteps.shape[0] - - #iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - iterator = time_range - - if self.alpha_generator_func != None: - alphas = self.alpha_generator_func(len(iterator)) - - - for i, step in enumerate(iterator): - - # set alpha - if self.alpha_generator_func != None: - self.set_alpha_scale(self.model, alphas[i]) - - # run - index = total_steps - i - 1 - input["timesteps"] = torch.full((b,), step, device=self.device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.diffusion.q_sample( x0, input["timesteps"] ) - img = img_orig * mask + (1. - mask) * img - input["x"] = img - - img, pred_x0 = self.p_sample_ddim(input, index=index, uc=uc, guidance_scale=guidance_scale) - input["x"] = img - - return img - - - @torch.no_grad() - def p_sample_ddim(self, input, index, uc=None, guidance_scale=1): - - - e_t = self.model(input) - if uc is not None and guidance_scale != 1: - unconditional_input = dict(x=input["x"], timesteps=input["timesteps"], context=uc) - if "inpainting_extra_input" in input: - unconditional_input["inpainting_extra_input"] = input["inpainting_extra_input"] - e_t_uncond = self.model( unconditional_input ) - e_t = e_t_uncond + guidance_scale * (e_t - e_t_uncond) - - # select parameters corresponding to the currently considered timestep - b = input["x"].shape[0] - a_t = torch.full((b, 1, 1, 1), self.ddim_alphas[index], device=self.device) - a_prev = torch.full((b, 1, 1, 1), self.ddim_alphas_prev[index], device=self.device) - sigma_t = torch.full((b, 1, 1, 1), self.ddim_sigmas[index], device=self.device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), self.ddim_sqrt_one_minus_alphas[index],device=self.device) - - # current prediction for x_0 - pred_x0 = (input["x"] - sqrt_one_minus_at * e_t) / a_t.sqrt() - - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * torch.randn_like( input["x"] ) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - - return x_prev, pred_x0 diff --git a/spaces/gordonchan/h2oo/app.py b/spaces/gordonchan/h2oo/app.py deleted file mode 100644 index 0582a5702f6222dbae081e4985ef8daeb6111cab..0000000000000000000000000000000000000000 --- a/spaces/gordonchan/h2oo/app.py +++ /dev/null @@ -1 +0,0 @@ -gen.py \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Edius 5 Effects Free Download LINK.md b/spaces/gotiQspiryo/whisper-ui/examples/Edius 5 Effects Free Download LINK.md deleted file mode 100644 index 73b4a54addc7fbada03cdb627513a0a9e2b40f0c..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Edius 5 Effects Free Download LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

    edius 5 effects free download


    Download ✪✪✪ https://urlgoal.com/2uyN5t



    - -Edius 9,8,7,6,5 Effects Free Download. Edius Effects. A simple Introduction about Edius Effect. The Effect is based on all that data which you ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/gotiQspiryo/whisper-ui/examples/HD Online Player (mynameiskhanhindimoviefreedownload) __HOT__.md b/spaces/gotiQspiryo/whisper-ui/examples/HD Online Player (mynameiskhanhindimoviefreedownload) __HOT__.md deleted file mode 100644 index f728924b4c7d6585465987d74725265f274cb482..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/HD Online Player (mynameiskhanhindimoviefreedownload) __HOT__.md +++ /dev/null @@ -1,10 +0,0 @@ -

    HD Online Player (mynameiskhanhindimoviefreedownload)


    Download Filehttps://urlgoal.com/2uyLrg



    - -7 MBPC Version.nHence all aspects of film/video/audio quality can be set up and viewed.nThe player is more suitable for playing SD-DVDs.nIf you like it or not, you can give your comment.nAll credits for this plugin go to it's original developer...nFeel free to contact me for support if you need any.nThanks for downloading.nI'll update frequently if possible.nIf any of my credit doesn't work for you, feel free to contact me.nEnjoy!n/n"Additional info:nAdding Title to playlist?nDrag and Drop files to a playlist.nNo need to drag and drop every file to the playlist every time.nSave playlist and use the file path like so:nPlaylist: C:\path\to\playlist.nUse the player with no issues when dragging and dropping any of your custom files.nPlaylist: C:\Users\TIDAL\AppData\Local - -XMind\Playlist - -Password : << < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < 4fefd39f24
    -
    -
    -

    diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Johnson Graduate School Of Management Library.md b/spaces/gotiQspiryo/whisper-ui/examples/Johnson Graduate School Of Management Library.md deleted file mode 100644 index 0549a514ee55d5c49eec566b1ce18a927dc6724b..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Johnson Graduate School Of Management Library.md +++ /dev/null @@ -1,22 +0,0 @@ - -

    Together with new Associate Director Angela Horne, Lyons will work closely with all three schools' staff and communities to develop relationships and implement a shared vision for a new library model. That model will focus on enhancing services, expanding access to online resources, increasing efficiency and understanding evolving user needs across all three libraries.

    -

    johnson graduate school of management library


    Download File –––––>>> https://urlgoal.com/2uyN6W



    -

    "I'm excited about the opportunity to continue Cornell's tradition of excellence in library service in this new consolidated structure," Lyons said. "The idea is to create a new model that takes advantage of the unique intersections and strengths of the three schools and the three libraries. The task is challenging and even a bit intimidating, but the strong support from the deans and the library, in addition to the considerable talents and commitment of the library staff, will assure we remain at a level befitting an institution like Cornell."

    -

    12. The Stanford GSB library, Stanford Graduate School of Business, founded in 1933 it offers a wide variety of resources to help you conduct business research, including personal consultations, database guides, and research tools. The business library exists primarily to serve the research, teaching, and learning needs of current Stanford GSB faculty, staff, and students. With collection, digital and physical, covers a broad range of management topics and includes material on finance, accounting, economics, political economy, marketing, organizational behavior, and international business.

    -

    The Dyson School empowers students to become leaders who are ready to tackle global challenges, make a lasting impact, and work to solve some of the world's toughest societal challenges. It is recognized as one of the world's top undergraduate business schools.

    -

    -

    Weill Cornell Medicine is committed to educating the next generation of healthcare leaders through our medical and graduate schools, providing the best possible medical care to our patients, and speeding breakthrough discoveries from the lab bench to patient bedsides.

    -

    The Nestle Library seeks creative, energetic, forward-thinking candidates for the professional position of Public Services Librarian in Cornell University's School of Hotel Administration (SHA). As part of the Johnson College of Business (JCB), the School of Hotel Administration is a business management program with a focus on the hospitality industry, including operations, marketing, finance, management, and real estate. SHA integrates a diverse group of undergraduate, professional, and graduate students, faculty, staff, and practitioners to provide a unique educational experience. As the only Ivy League business management program to focus exclusively on the hospitality industry, SHA actively prepares students to be leaders in a dynamic global industry.

    -

    The Nestle Library works in tandem with librarians from the three schools that make up the Johnson College of Business - the School of Hotel Administration, the SC Johnson Graduate School of Management, and the Dyson School of Applied Economics and Management. JCB offers a variety of graduate and undergraduate degree offerings and houses a variety of research centers and institutes. JCB strives to mobilize diverse expertise, generate world-class knowledge, inspire students, and impact society in a positive manner.

    -

    Our specific services fall into the categories of career exploration, graduate and professional school advising and preparation, health careers, legal careers, internship and full-time job searches, fellowships, and specialized advising needs. We provide many resources in our libraries (online and in Barnes Hall), throughout this site, and in the Career Development Toolkit in Canvas.

    -

    Henry DeLand, founder and president of DeLand Academy from 1883 to 1885, personally selected John F. Forbes to be the first president of DeLand College,which became John B. Stetson University in 1889. Forbes graduated from Rochester University and was a former professor at the State Normal school in Brockport, New York; he was 32 when he assumed the office of president. He received a starting salary of $2,000 a year plus room and board for himself and his family. Enrollment grew from 88 students to almost 300 during the Forbes tenure and a number of buildings were constructed, including Stetson Hall, Chaudoin Hall, Elizabeth Hall, Flagler Hall, and the residence of the President. Forbes oversaw the start of the first law school in Florida at Stetson in 1900. Forbes resigned in 1903 to enter private business. Although he left Stetson after his resignation, he was kept on the payroll until the Trustees officially accepted his resignation in 1904. Date of birth: June 13, 1853; date of death: March 30, 1926.

    -

    William Sims Allen graduated from Baylor University and earned advanced degrees from Columbia University. Allen returned to Baylor where he served as vice president and chairman of the school of education prior to coming to Stetson at age 46. Although Allen coped with the challenges of the Depression years and World War II, Stetson grew from a few hundred students to 2,000 during his time in office. In order to deal with the rapid growth, Allen initiated a program of expansion that included establishing separate schools for the disciplines of music and business. The physical campus also grew during this period and higher academic standards were put in place. The Allen inaugural address was carried over the first Florida statewide radio transmission. He resigned due to illness in September 1947. Date of birth: October 27, 1888; date of death: June 1, 1951.

    -

    Formal movements towards a business school began in 1914 when faculty in the NYS College of Agriculture (which today offers an undergraduate business major) convened the first meeting of the "Committee on a Commercial College." Led by economics professor Allyn Young, the committee recommended the creation of a "two-year graduate course leading to the Master's degree" in both business and public administration. Young had been trained at Harvard University, and the influence on the committee's discussion of its business school's creation only six years prior was apparent, as the committee's recommendations included instruction for graduate students only, selectivity in admissions, and integration into the larger university community.[5]

    -

    During this period faculty divisions began to emerge, with three distinct groups vying for resources: business management, public administration, and healthcare administration (the Sloan Program). In 1983, the faculty voted to end instruction in the latter two fields and to change the school's name to the Graduate School of Management. The public administration program moved to the NYS College of Human Ecology. That same year, the school began offering a dual-degree MBA/MA in Asian Studies with Cornell's FALCON (Full-year Asian Language Concentration) program, to produce American MBAs with some knowledge of the Japanese language and culture gained through coursework in Ithaca and a required summer internship in Japan. The school also created an MBA/MEng, originally called the Program in Manufacturing Management (PIMM). At the same time, Curtis W. Tarr was appointed the dean of the school.[5]

    -

    Johnson is housed entirely in Sage Hall, a 19th-century High Victorian Gothic building which was originally built as a women's dormitory.[9][19] It is located near the center of Cornell's main campus, across the street from the Cornell School of Hotel Administration and the four-diamond Statler Hotel. Inside Sage are a management library, a café, an atrium, classrooms, an executive lounge, a trading floor, student and faculty lounges, and a parlor. There are 38 breakout rooms and two phone booths. The building also has showers, shoe shining, and out-service dry cleaning. Offices are provided for all faculty and doctoral students, and MBA students are all assigned a locker.

    -

    The school's graduates have served in executive leadership positions for numerous corporations. Alumni include Kraft Foods CEO Irene Rosenfeld (Ph.D. '80),[52] Aetna CEO Mark Bertolini (MBA '84),[53] Silicon Valley venture capitalist Mary Meeker (MBA '86),[54] Ocean Spray CEO Randy Papadellis (MBA),[55] co-founder of PeopleSoft David Duffield (MBA '62),[56] Strategy& Middle East Chairman Joe Saddi (MBA '83),[57] former Chevron CEO Ken Durr (MBA '60),[58] former Cargill CEO Warren Staley (MBA '67),[59] former CEO of Emerson Charles F. Knight (MBA '59),[60] former Applied Materials CEO James C. Morgan (MBA '63),[61] Rock and Roll Hall of Fame President Terry C. Stewart (MBA '72),[62] Sprint Nextel CEO Dan Hesse (MBA '77),[63] BP CFO Byron Grote (Ph.D. '81),[64] Comcast CIO Andrew Baer (MBA '82),[65] S.C. Johnson & Son CEO Fisk Johnson (MBA '84),[66] Henry Ford Hospital CEO Nancy Schlichting (MBA '79),[67] and Priceline.com CEO Brett Keller (MBA '97).[68]

    -

    To make the most of the admission process, students should solidify their reasons for specifically choosing Cornell, as it is a collaborative and close-knit environment. Doing your due diligence on the graduate real estate minor as well as understanding the opportunities presented to you through collaboration with the Baker program and various other graduate schools on campus shows a thorough understanding of the collaborative environment of Cornell.

    -

    In the National Real Estate Challenge, teams of graduate students from the top-ranked business schools are invited to Austin to participate in a case-based real estate competition with cash awards for the top four teams.

    -

    Scholarships are available to assist individuals studying to become law librarians as either a library or law school student, or to library school graduates seeking an advanced degree in a related field. Candidates should apply for more than one scholarship when appropriate. Preference is given to AALL members, but scholarships are not restricted to members. Applicants with law library experience are also given preference, but it is not required. Evidence of financial need must be submitted.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/gradio/blocks_essay_update/README.md b/spaces/gradio/blocks_essay_update/README.md deleted file mode 100644 index ff745448ca55016d947d1d8b3bae330acf77a78b..0000000000000000000000000000000000000000 --- a/spaces/gradio/blocks_essay_update/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: blocks_essay_update -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.44.4 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Sidebar/components/OpenCloseButton.tsx b/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Sidebar/components/OpenCloseButton.tsx deleted file mode 100644 index 80ad30cfee42f5758c1d81b57df4fbda7a56e586..0000000000000000000000000000000000000000 --- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Sidebar/components/OpenCloseButton.tsx +++ /dev/null @@ -1,42 +0,0 @@ -import { IconArrowBarLeft, IconArrowBarRight } from '@tabler/icons-react'; - -interface Props { - onClick: any; - side: 'left' | 'right'; -} - -export const CloseSidebarButton = ({ onClick, side }: Props) => { - return ( - <> - -
    - - ); -}; - -export const OpenSidebarButton = ({ onClick, side }: Props) => { - return ( - - ); -}; diff --git a/spaces/gundruke/ua-thesis-absa/README.md b/spaces/gundruke/ua-thesis-absa/README.md deleted file mode 100644 index 483a681c628c117f1702656d3a039a6bd4cb522e..0000000000000000000000000000000000000000 --- a/spaces/gundruke/ua-thesis-absa/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ua Thesis Absa -emoji: 😻 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hamacojr/CAT-Seg/demo/visualizer.py b/spaces/hamacojr/CAT-Seg/demo/visualizer.py deleted file mode 100644 index 9903b7e49715d5df0ea861aec164e047abb09a80..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/CAT-Seg/demo/visualizer.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copied from: https://github.com/facebookresearch/detectron2/blob/master/demo/predictor.py -import atexit -import bisect -import multiprocessing as mp -from collections import deque - -import cv2 -import torch - -from detectron2.data import MetadataCatalog -from detectron2.engine.defaults import DefaultPredictor -from detectron2.utils.video_visualizer import VideoVisualizer -from detectron2.utils.visualizer import ColorMode, Visualizer - - -class VisualizationGt(object): - def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): - """ - Args: - cfg (CfgNode): - instance_mode (ColorMode): - parallel (bool): whether to run the model in different processes from visualization. - Useful since the visualization logic can be slow. - """ - self.metadata = MetadataCatalog.get( - cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused" - ) - self.cpu_device = torch.device("cpu") - self.instance_mode = instance_mode - - self.parallel = parallel - if parallel: - num_gpu = torch.cuda.device_count() - self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) - else: - self.predictor = DefaultPredictor(cfg) - - def run_on_image(self, image, predictions): - """ - Args: - image (np.ndarray): an image of shape (H, W, C) (in BGR order). - This is the format used by OpenCV. - Returns: - predictions (dict): the output of the model. - vis_output (VisImage): the visualized image output. - """ - vis_output = None - # predictions = self.predictor(image) - # Convert image from OpenCV BGR format to Matplotlib RGB format. - image = image[:, :, ::-1] - visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode) - if "panoptic_seg" in predictions: - panoptic_seg, segments_info = predictions["panoptic_seg"] - vis_output = visualizer.draw_panoptic_seg_predictions( - panoptic_seg.to(self.cpu_device), segments_info - ) - else: - if "sem_seg" in predictions: - vis_output = visualizer.draw_sem_seg( - predictions["sem_seg"] - ) - if "instances" in predictions: - instances = predictions["instances"].to(self.cpu_device) - vis_output = visualizer.draw_instance_predictions(predictions=instances) - - return predictions, vis_output - - def _frame_from_video(self, video): - while video.isOpened(): - success, frame = video.read() - if success: - yield frame - else: - break - - def run_on_video(self, video): - """ - Visualizes predictions on frames of the input video. - Args: - video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be - either a webcam or a video file. - Yields: - ndarray: BGR visualizations of each video frame. - """ - video_visualizer = VideoVisualizer(self.metadata, self.instance_mode) - - def process_predictions(frame, predictions): - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - if "panoptic_seg" in predictions: - panoptic_seg, segments_info = predictions["panoptic_seg"] - vis_frame = video_visualizer.draw_panoptic_seg_predictions( - frame, panoptic_seg.to(self.cpu_device), segments_info - ) - elif "instances" in predictions: - predictions = predictions["instances"].to(self.cpu_device) - vis_frame = video_visualizer.draw_instance_predictions(frame, predictions) - elif "sem_seg" in predictions: - vis_frame = video_visualizer.draw_sem_seg( - frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) - ) - - # Converts Matplotlib RGB format to OpenCV BGR format - vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR) - return vis_frame - - frame_gen = self._frame_from_video(video) - if self.parallel: - buffer_size = self.predictor.default_buffer_size - - frame_data = deque() - - for cnt, frame in enumerate(frame_gen): - frame_data.append(frame) - self.predictor.put(frame) - - if cnt >= buffer_size: - frame = frame_data.popleft() - predictions = self.predictor.get() - yield process_predictions(frame, predictions) - - while len(frame_data): - frame = frame_data.popleft() - predictions = self.predictor.get() - yield process_predictions(frame, predictions) - else: - for frame in frame_gen: - yield process_predictions(frame, self.predictor(frame)) - - -class AsyncPredictor: - """ - A predictor that runs the model asynchronously, possibly on >1 GPUs. - Because rendering the visualization takes considerably amount of time, - this helps improve throughput a little bit when rendering videos. - """ - - class _StopToken: - pass - - class _PredictWorker(mp.Process): - def __init__(self, cfg, task_queue, result_queue): - self.cfg = cfg - self.task_queue = task_queue - self.result_queue = result_queue - super().__init__() - - def run(self): - predictor = DefaultPredictor(self.cfg) - - while True: - task = self.task_queue.get() - if isinstance(task, AsyncPredictor._StopToken): - break - idx, data = task - result = predictor(data) - self.result_queue.put((idx, result)) - - def __init__(self, cfg, num_gpus: int = 1): - """ - Args: - cfg (CfgNode): - num_gpus (int): if 0, will run on CPU - """ - num_workers = max(num_gpus, 1) - self.task_queue = mp.Queue(maxsize=num_workers * 3) - self.result_queue = mp.Queue(maxsize=num_workers * 3) - self.procs = [] - for gpuid in range(max(num_gpus, 1)): - cfg = cfg.clone() - cfg.defrost() - cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" - self.procs.append( - AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) - ) - - self.put_idx = 0 - self.get_idx = 0 - self.result_rank = [] - self.result_data = [] - - for p in self.procs: - p.start() - atexit.register(self.shutdown) - - def put(self, image): - self.put_idx += 1 - self.task_queue.put((self.put_idx, image)) - - def get(self): - self.get_idx += 1 # the index needed for this request - if len(self.result_rank) and self.result_rank[0] == self.get_idx: - res = self.result_data[0] - del self.result_data[0], self.result_rank[0] - return res - - while True: - # make sure the results are returned in the correct order - idx, res = self.result_queue.get() - if idx == self.get_idx: - return res - insert = bisect.bisect(self.result_rank, idx) - self.result_rank.insert(insert, idx) - self.result_data.insert(insert, res) - - def __len__(self): - return self.put_idx - self.get_idx - - def __call__(self, image): - self.put(image) - return self.get() - - def shutdown(self): - for _ in self.procs: - self.task_queue.put(AsyncPredictor._StopToken()) - - @property - def default_buffer_size(self): - return len(self.procs) * 5 diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py deleted file mode 100644 index ce3c00ecbfd0535d54701299fc8337680a68438f..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py +++ /dev/null @@ -1,121 +0,0 @@ -import cv2 -import numpy as np -import torch -from torch import nn - -from maskrcnn_benchmark.structures.bounding_box import BoxList -from maskrcnn_benchmark.structures.keypoint import PersonKeypoints - - -class KeypointPostProcessor(nn.Module): - def __init__(self, keypointer=None): - super(KeypointPostProcessor, self).__init__() - self.keypointer = keypointer - - def forward(self, x, boxes): - mask_prob = x - - scores = None - if self.keypointer: - mask_prob, scores = self.keypointer(x, boxes) - - assert len(boxes) == 1, "Only non-batched inference supported for now" - boxes_per_image = [box.bbox.size(0) for box in boxes] - mask_prob = mask_prob.split(boxes_per_image, dim=0) - scores = scores.split(boxes_per_image, dim=0) - - results = [] - for prob, box, score in zip(mask_prob, boxes, scores): - bbox = BoxList(box.bbox, box.size, mode="xyxy") - for field in box.fields(): - bbox.add_field(field, box.get_field(field)) - prob = PersonKeypoints(prob, box.size) - prob.add_field("logits", score) - bbox.add_field("keypoints", prob) - results.append(bbox) - - return results - - -def heatmaps_to_keypoints(maps, rois): - """Extract predicted keypoint locations from heatmaps. Output has shape - (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) - for each keypoint. - """ - # This function converts a discrete image coordinate in a HEATMAP_SIZE x - # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain - # consistency with keypoints_to_heatmap_labels by using the conversion from - # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a - # continuous coordinate. - offset_x = rois[:, 0] - offset_y = rois[:, 1] - - widths = rois[:, 2] - rois[:, 0] - heights = rois[:, 3] - rois[:, 1] - widths = np.maximum(widths, 1) - heights = np.maximum(heights, 1) - widths_ceil = np.ceil(widths) - heights_ceil = np.ceil(heights) - - # NCHW to NHWC for use with OpenCV - maps = np.transpose(maps, [0, 2, 3, 1]) - min_size = 0 # cfg.KRCNN.INFERENCE_MIN_SIZE - num_keypoints = maps.shape[3] - xy_preds = np.zeros((len(rois), 3, num_keypoints), dtype=np.float32) - end_scores = np.zeros((len(rois), num_keypoints), dtype=np.float32) - for i in range(len(rois)): - if min_size > 0: - roi_map_width = int(np.maximum(widths_ceil[i], min_size)) - roi_map_height = int(np.maximum(heights_ceil[i], min_size)) - else: - roi_map_width = widths_ceil[i] - roi_map_height = heights_ceil[i] - width_correction = widths[i] / roi_map_width - height_correction = heights[i] / roi_map_height - roi_map = cv2.resize( - maps[i], (roi_map_width, roi_map_height), interpolation=cv2.INTER_CUBIC - ) - # Bring back to CHW - roi_map = np.transpose(roi_map, [2, 0, 1]) - # roi_map_probs = scores_to_probs(roi_map.copy()) - w = roi_map.shape[2] - pos = roi_map.reshape(num_keypoints, -1).argmax(axis=1) - x_int = pos % w - y_int = (pos - x_int) // w - # assert (roi_map_probs[k, y_int, x_int] == - # roi_map_probs[k, :, :].max()) - x = (x_int + 0.5) * width_correction - y = (y_int + 0.5) * height_correction - xy_preds[i, 0, :] = x + offset_x[i] - xy_preds[i, 1, :] = y + offset_y[i] - xy_preds[i, 2, :] = 1 - end_scores[i, :] = roi_map[np.arange(num_keypoints), y_int, x_int] - - return np.transpose(xy_preds, [0, 2, 1]), end_scores - - -class Keypointer(object): - """ - Projects a set of masks in an image on the locations - specified by the bounding boxes - """ - - def __init__(self, padding=0): - self.padding = padding - - def __call__(self, masks, boxes): - # TODO do this properly - if isinstance(boxes, BoxList): - boxes = [boxes] - assert len(boxes) == 1 - - result, scores = heatmaps_to_keypoints( - masks.detach().cpu().numpy(), boxes[0].bbox.cpu().numpy() - ) - return torch.from_numpy(result).to(masks.device), torch.as_tensor(scores, device=masks.device) - - -def make_roi_keypoint_post_processor(cfg): - keypointer = Keypointer() - keypoint_post_processor = KeypointPostProcessor(keypointer) - return keypoint_post_processor \ No newline at end of file diff --git a/spaces/hca97/Mosquito-Detection/my_models/torch_hub_cache/yolov5/utils/__init__.py b/spaces/hca97/Mosquito-Detection/my_models/torch_hub_cache/yolov5/utils/__init__.py deleted file mode 100644 index d33b92449e9d1b472e36c7354e0e505dcab3f07e..0000000000000000000000000000000000000000 --- a/spaces/hca97/Mosquito-Detection/my_models/torch_hub_cache/yolov5/utils/__init__.py +++ /dev/null @@ -1,86 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -utils/initialization -""" - -import contextlib -import platform -import threading - - -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - -class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg=''): - self.msg = msg - - def __enter__(self): - pass - - def __exit__(self, exc_type, value, traceback): - if value: - print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) - return True - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - -def join_threads(verbose=False): - # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) - main_thread = threading.current_thread() - for t in threading.enumerate(): - if t is not main_thread: - if verbose: - print(f'Joining thread {t.name}') - t.join() - - -def notebook_init(verbose=True): - # Check system software and hardware - print('Checking setup...') - - import os - import shutil - - from ultralytics.utils.checks import check_requirements - - from utils.general import check_font, is_colab - from utils.torch_utils import select_device # imports - - check_font() - - import psutil - - if check_requirements('wandb', install=False): - os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang - if is_colab(): - shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory - - # System info - display = None - if verbose: - gb = 1 << 30 # bytes to GiB (1024 ** 3) - ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage('/') - with contextlib.suppress(Exception): # clear display if ipython is installed - from IPython import display - display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' - else: - s = '' - - select_device(newline=False) - print(emojis(f'Setup complete ✅ {s}')) - return display diff --git a/spaces/heiyubili/bingo/src/lib/isomorphic/index.ts b/spaces/heiyubili/bingo/src/lib/isomorphic/index.ts deleted file mode 100644 index 738dc92f74079ab762d584fb7422a8c8c3b61547..0000000000000000000000000000000000000000 --- a/spaces/heiyubili/bingo/src/lib/isomorphic/index.ts +++ /dev/null @@ -1,17 +0,0 @@ -'use client' - -import Default from './browser' - -let exportsModel: any = {} - -if (process.browser) { - Object.assign(exportsModel, require('./browser').default) -} else { - Object.assign(exportsModel, require('./node').default) -} - -export default exportsModel! as typeof Default - -export const fetch: typeof Default.fetch = exportsModel!.fetch -export const WebSocket: typeof Default.WebSocket = exportsModel!.WebSocket -export const debug: typeof Default.debug = exportsModel!.debug diff --git a/spaces/hekbobo/bingo/src/components/ui/dialog.tsx b/spaces/hekbobo/bingo/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/hekbobo/bingo/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
    - {children} -
    -
    -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/hf4all/web-ui/README.md b/spaces/hf4all/web-ui/README.md deleted file mode 100644 index d9431cb5123191820605b2a570b0060c79f99883..0000000000000000000000000000000000000000 --- a/spaces/hf4all/web-ui/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: gradio-chatbot-ui -emoji: 📉 -colorFrom: blue -colorTo: blue -sdk: static -pinned: true -license: gpl-3.0 ---- - -# hf4all-web diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/evaluation/model_selection/figure_out_what_to_submit.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/evaluation/model_selection/figure_out_what_to_submit.py deleted file mode 100644 index 8fc5840d187d981d617e643892b6e49719508b13..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/evaluation/model_selection/figure_out_what_to_submit.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import shutil -from itertools import combinations -import nnunet -from batchgenerators.utilities.file_and_folder_operations import * -from nnunet.evaluation.add_mean_dice_to_json import foreground_mean -from nnunet.evaluation.evaluator import evaluate_folder -from nnunet.evaluation.model_selection.ensemble import ensemble -from nnunet.paths import network_training_output_dir -import numpy as np -from subprocess import call -from nnunet.postprocessing.consolidate_postprocessing import consolidate_folds, collect_cv_niftis -from nnunet.utilities.folder_names import get_output_folder_name -from nnunet.paths import default_cascade_trainer, default_trainer, default_plans_identifier - - -def find_task_name(folder, task_id): - candidates = subdirs(folder, prefix="Task%03.0d_" % task_id, join=False) - assert len(candidates) > 0, "no candidate for Task id %d found in folder %s" % (task_id, folder) - assert len(candidates) == 1, "more than one candidate for Task id %d found in folder %s" % (task_id, folder) - return candidates[0] - - -def get_mean_foreground_dice(json_file): - results = load_json(json_file) - return get_foreground_mean(results) - - -def get_foreground_mean(results): - results_mean = results['results']['mean'] - dice_scores = [results_mean[i]['Dice'] for i in results_mean.keys() if i != "0" and i != 'mean'] - return np.mean(dice_scores) - - -def main(): - import argparse - parser = argparse.ArgumentParser(usage="This is intended to identify the best model based on the five fold " - "cross-validation. Running this script requires all models to have been run " - "already. This script will summarize the results of the five folds of all " - "models in one json each for easy interpretability") - - parser.add_argument("-m", '--models', nargs="+", required=False, default=['2d', '3d_lowres', '3d_fullres', - '3d_cascade_fullres']) - parser.add_argument("-t", '--task_ids', nargs="+", required=True) - - parser.add_argument("-tr", type=str, required=False, default=default_trainer, - help="nnUNetTrainer class. Default: %s" % default_trainer) - parser.add_argument("-ctr", type=str, required=False, default=default_cascade_trainer, - help="nnUNetTrainer class for cascade model. Default: %s" % default_cascade_trainer) - parser.add_argument("-pl", type=str, required=False, default=default_plans_identifier, - help="plans name, Default: %s" % default_plans_identifier) - parser.add_argument('-f', '--folds', nargs='+', default=(0, 1, 2, 3, 4), help="Use this if you have non-standard " - "folds. Experienced users only.") - parser.add_argument('--disable_ensembling', required=False, default=False, action='store_true', - help='Set this flag to disable the use of ensembling. This will find the best single ' - 'configuration for each task.') - parser.add_argument("--disable_postprocessing", required=False, default=False, action="store_true", - help="Set this flag if you want to disable the use of postprocessing") - - args = parser.parse_args() - tasks = [int(i) for i in args.task_ids] - - models = args.models - tr = args.tr - trc = args.ctr - pl = args.pl - disable_ensembling = args.disable_ensembling - disable_postprocessing = args.disable_postprocessing - folds = tuple(int(i) for i in args.folds) - - validation_folder = "validation_raw" - - # this script now acts independently from the summary jsons. That was unnecessary - id_task_mapping = {} - - for t in tasks: - # first collect pure model performance - results = {} - all_results = {} - valid_models = [] - for m in models: - if m == "3d_cascade_fullres": - trainer = trc - else: - trainer = tr - - if t not in id_task_mapping.keys(): - task_name = find_task_name(get_output_folder_name(m), t) - id_task_mapping[t] = task_name - - output_folder = get_output_folder_name(m, id_task_mapping[t], trainer, pl) - if not isdir(output_folder): - raise RuntimeError("Output folder for model %s is missing, expected: %s" % (m, output_folder)) - - if disable_postprocessing: - # we need to collect the predicted niftis from the 5-fold cv and evaluate them against the ground truth - cv_niftis_folder = join(output_folder, 'cv_niftis_raw') - - if not isfile(join(cv_niftis_folder, 'summary.json')): - print(t, m, ': collecting niftis from 5-fold cv') - if isdir(cv_niftis_folder): - shutil.rmtree(cv_niftis_folder) - - collect_cv_niftis(output_folder, cv_niftis_folder, validation_folder, folds) - - niftis_gt = subfiles(join(output_folder, "gt_niftis"), suffix='.nii.gz', join=False) - niftis_cv = subfiles(cv_niftis_folder, suffix='.nii.gz', join=False) - if not all([i in niftis_gt for i in niftis_cv]): - raise AssertionError("It does not seem like you trained all the folds! Train " \ - "all folds first! There are %d gt niftis in %s but only " \ - "%d predicted niftis in %s" % (len(niftis_gt), niftis_gt, - len(niftis_cv), niftis_cv)) - - # load a summary file so that we can know what class labels to expect - summary_fold0 = load_json(join(output_folder, "fold_%d" % folds[0], validation_folder, - "summary.json"))['results']['mean'] - # read classes from summary.json - classes = tuple((int(i) for i in summary_fold0.keys())) - - # evaluate the cv niftis - print(t, m, ': evaluating 5-fold cv results') - evaluate_folder(join(output_folder, "gt_niftis"), cv_niftis_folder, classes) - - else: - postprocessing_json = join(output_folder, "postprocessing.json") - cv_niftis_folder = join(output_folder, "cv_niftis_raw") - - # we need cv_niftis_postprocessed to know the single model performance. And we need the - # postprocessing_json. If either of those is missing, rerun consolidate_folds - if not isfile(postprocessing_json) or not isdir(cv_niftis_folder): - print("running missing postprocessing for %s and model %s" % (id_task_mapping[t], m)) - consolidate_folds(output_folder, folds=folds) - - assert isfile(postprocessing_json), "Postprocessing json missing, expected: %s" % postprocessing_json - assert isdir(cv_niftis_folder), "Folder with niftis from CV missing, expected: %s" % cv_niftis_folder - - # obtain mean foreground dice - summary_file = join(cv_niftis_folder, "summary.json") - results[m] = get_mean_foreground_dice(summary_file) - foreground_mean(summary_file) - all_results[m] = load_json(summary_file)['results']['mean'] - valid_models.append(m) - - if not disable_ensembling: - # now run ensembling and add ensembling to results - print("\nI will now ensemble combinations of the following models:\n", valid_models) - if len(valid_models) > 1: - for m1, m2 in combinations(valid_models, 2): - - trainer_m1 = trc if m1 == "3d_cascade_fullres" else tr - trainer_m2 = trc if m2 == "3d_cascade_fullres" else tr - - ensemble_name = "ensemble_" + m1 + "__" + trainer_m1 + "__" + pl + "--" + m2 + "__" + trainer_m2 + "__" + pl - output_folder_base = join(network_training_output_dir, "ensembles", id_task_mapping[t], ensemble_name) - maybe_mkdir_p(output_folder_base) - - network1_folder = get_output_folder_name(m1, id_task_mapping[t], trainer_m1, pl) - network2_folder = get_output_folder_name(m2, id_task_mapping[t], trainer_m2, pl) - - print("ensembling", network1_folder, network2_folder) - ensemble(network1_folder, network2_folder, output_folder_base, id_task_mapping[t], validation_folder, folds, allow_ensembling=not disable_postprocessing) - # ensembling will automatically do postprocessingget_foreground_mean - - # now get result of ensemble - results[ensemble_name] = get_mean_foreground_dice(join(output_folder_base, "ensembled_raw", "summary.json")) - summary_file = join(output_folder_base, "ensembled_raw", "summary.json") - foreground_mean(summary_file) - all_results[ensemble_name] = load_json(summary_file)['results']['mean'] - - # now print all mean foreground dice and highlight the best - foreground_dices = list(results.values()) - best = np.max(foreground_dices) - for k, v in results.items(): - print(k, v) - - predict_str = "" - best_model = None - for k, v in results.items(): - if v == best: - print("%s submit model %s" % (id_task_mapping[t], k), v) - best_model = k - print("\nHere is how you should predict test cases. Run in sequential order and replace all input and output folder names with your personalized ones\n") - if k.startswith("ensemble"): - tmp = k[len("ensemble_"):] - model1, model2 = tmp.split("--") - m1, t1, pl1 = model1.split("__") - m2, t2, pl2 = model2.split("__") - predict_str += "nnUNet_predict -i FOLDER_WITH_TEST_CASES -o OUTPUT_FOLDER_MODEL1 -tr " + tr + " -ctr " + trc + " -m " + m1 + " -p " + pl + " -t " + \ - id_task_mapping[t] + "\n" - predict_str += "nnUNet_predict -i FOLDER_WITH_TEST_CASES -o OUTPUT_FOLDER_MODEL2 -tr " + tr + " -ctr " + trc + " -m " + m2 + " -p " + pl + " -t " + \ - id_task_mapping[t] + "\n" - - if not disable_postprocessing: - predict_str += "nnUNet_ensemble -f OUTPUT_FOLDER_MODEL1 OUTPUT_FOLDER_MODEL2 -o OUTPUT_FOLDER -pp " + join(network_training_output_dir, "ensembles", id_task_mapping[t], k, "postprocessing.json") + "\n" - else: - predict_str += "nnUNet_ensemble -f OUTPUT_FOLDER_MODEL1 OUTPUT_FOLDER_MODEL2 -o OUTPUT_FOLDER\n" - else: - predict_str += "nnUNet_predict -i FOLDER_WITH_TEST_CASES -o OUTPUT_FOLDER_MODEL1 -tr " + tr + " -ctr " + trc + " -m " + k + " -p " + pl + " -t " + \ - id_task_mapping[t] + "\n" - print(predict_str) - - summary_folder = join(network_training_output_dir, "ensembles", id_task_mapping[t]) - maybe_mkdir_p(summary_folder) - with open(join(summary_folder, "prediction_commands.txt"), 'w') as f: - f.write(predict_str) - - num_classes = len([i for i in all_results[best_model].keys() if i != 'mean' and i != '0']) - with open(join(summary_folder, "summary.csv"), 'w') as f: - f.write("model") - for c in range(1, num_classes + 1): - f.write(",class%d" % c) - f.write(",average") - f.write("\n") - for m in all_results.keys(): - f.write(m) - for c in range(1, num_classes + 1): - f.write(",%01.4f" % all_results[m][str(c)]["Dice"]) - f.write(",%01.4f" % all_results[m]['mean']["Dice"]) - f.write("\n") - - -if __name__ == "__main__": - main() diff --git a/spaces/housexu123/bingo-2.0/src/components/chat-notification.tsx b/spaces/housexu123/bingo-2.0/src/components/chat-notification.tsx deleted file mode 100644 index 4be24d0f1755c8058698cfa66c736d8d4792475a..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/components/chat-notification.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( -
    - 你已达到每日最大发送消息次数,请更换账号或隔一天后重试 -
    - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - - 你的账号已在黑名单,请尝试更换账号及申请解封 - - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( -
    - 当前话题已中止,请点 - 重新开始 - 开启新的对话 -
    - ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - - 点击通过人机验证 - - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - 没有获取到身份信息或身份信息失效,点此重新设置 - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( -
    -
    -
    -
    -
    - error - {getAction(message.error, () => bot.resetConversation())} -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/housexu123/bingo-2.0/src/components/chat-panel.tsx b/spaces/housexu123/bingo-2.0/src/components/chat-panel.tsx deleted file mode 100644 index 1fbc3c2bf05b914e0c229661832fbb560745f488..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/components/chat-panel.tsx +++ /dev/null @@ -1,153 +0,0 @@ -'use client' - -import * as React from 'react' -import Image from 'next/image' -import Textarea from 'react-textarea-autosize' -import { useAtomValue } from 'jotai' -import { useEnterSubmit } from '@/lib/hooks/use-enter-submit' -import { cn } from '@/lib/utils' - -import BrushIcon from '@/assets/images/brush.svg' -import ChatIcon from '@/assets/images/chat.svg' -import VisualSearchIcon from '@/assets/images/visual-search.svg' -import SendIcon from '@/assets/images/send.svg' -import PinIcon from '@/assets/images/pin.svg' -import PinFillIcon from '@/assets/images/pin-fill.svg' - -import { useBing } from '@/lib/hooks/use-bing' -import { voiceListenAtom } from '@/state' -import Voice from './voice' -import { ChatImage } from './chat-image' -import { ChatAttachments } from './chat-attachments' - -export interface ChatPanelProps - extends Pick< - ReturnType, - | 'generating' - | 'input' - | 'setInput' - | 'sendMessage' - | 'resetConversation' - | 'isSpeaking' - | 'attachmentList' - | 'uploadImage' - | 'setAttachmentList' - > { - id?: string - className?: string -} - -export function ChatPanel({ - isSpeaking, - generating, - input, - setInput, - className, - sendMessage, - resetConversation, - attachmentList, - uploadImage, - setAttachmentList -}: ChatPanelProps) { - const inputRef = React.useRef(null) - const {formRef, onKeyDown} = useEnterSubmit() - const [focused, setFocused] = React.useState(false) - const [active, setActive] = React.useState(false) - const [pin, setPin] = React.useState(false) - const [tid, setTid] = React.useState() - const voiceListening = useAtomValue(voiceListenAtom) - - const setBlur = React.useCallback(() => { - clearTimeout(tid) - setActive(false) - const _tid = setTimeout(() => setFocused(false), 2000); - setTid(_tid) - }, [tid]) - - const setFocus = React.useCallback(() => { - setFocused(true) - setActive(true) - clearTimeout(tid) - inputRef.current?.focus() - }, [tid]) - - React.useEffect(() => { - if (input) { - setFocus() - } - }, [input]) - - return ( -
    { - e.preventDefault() - if (generating) { - return; - } - if (!input?.trim()) { - return - } - setInput('') - setPin(false) - await sendMessage(input) - }} - ref={formRef} - > -
    -
    -
    -
    -
    -
    -
    - -
    -
    -
    -
    - chat -