diff --git a/spaces/17TheWord/RealESRGAN/setup.py b/spaces/17TheWord/RealESRGAN/setup.py deleted file mode 100644 index c2b92e31d2db1aba50767f4f844540cfd53c609d..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/setup.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python - -from setuptools import find_packages, setup - -import os -import subprocess -import time - -version_file = 'realesrgan/version.py' - - -def readme(): - with open('README.md', encoding='utf-8') as f: - content = f.read() - return content - - -def get_git_hash(): - - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - except OSError: - sha = 'unknown' - - return sha - - -def get_hash(): - if os.path.exists('.git'): - sha = get_git_hash()[:7] - else: - sha = 'unknown' - - return sha - - -def write_version_py(): - content = """# GENERATED VERSION FILE -# TIME: {} -__version__ = '{}' -__gitsha__ = '{}' -version_info = ({}) -""" - sha = get_hash() - with open('VERSION', 'r') as f: - SHORT_VERSION = f.read().strip() - VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) - - version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) - with open(version_file, 'w') as f: - f.write(version_file_str) - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -def get_requirements(filename='requirements.txt'): - here = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(here, filename), 'r') as f: - requires = [line.replace('\n', '') for line in f.readlines()] - return requires - - -if __name__ == '__main__': - write_version_py() - setup( - name='realesrgan', - version=get_version(), - description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration', - long_description=readme(), - long_description_content_type='text/markdown', - author='Xintao Wang', - author_email='xintao.wang@outlook.com', - keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan', - url='https://github.com/xinntao/Real-ESRGAN', - include_package_data=True, - packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), - classifiers=[ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - ], - license='BSD-3-Clause License', - setup_requires=['cython', 'numpy'], - install_requires=get_requirements(), - zip_safe=False) diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/All Autodesk 2018 Products Crack Keygen (x86x64) !Latest Utorrent HOT.md b/spaces/1gistliPinn/ChatGPT4/Examples/All Autodesk 2018 Products Crack Keygen (x86x64) !Latest Utorrent HOT.md deleted file mode 100644 index 60d27358ed11d75e99d0a4c0eb6ddfc5b3b44d67..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/All Autodesk 2018 Products Crack Keygen (x86x64) !Latest Utorrent HOT.md +++ /dev/null @@ -1,6 +0,0 @@ -

All Autodesk 2018 Products Crack Keygen (x86x64) !{Latest} Utorrent


Download Zip ★★★ https://imgfil.com/2uxXDX



-
-Autodesk 3ds max 2011 32 bit xforce keygen free. ... sketchbook pro 2011 serial number and product key download on autodesk ... 12 Jan 2015 8 free download full version with crack xforce keygen 64 bit microsoft visual 3. ... Latest Utorrent. All AutoCAD For Mac 2018 Products Crack Keygen (x86x64) !{ 4d29de3e1b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Driver Booster Pro Full Version.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Driver Booster Pro Full Version.md deleted file mode 100644 index 6901462e62647e431e863470d5e797bcce1d9d11..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Driver Booster Pro Full Version.md +++ /dev/null @@ -1,21 +0,0 @@ -
-

How to Download and Install Driver Booster Pro Full Version for Windows

-

Driver Booster Pro is a powerful and easy-to-use tool that helps you update your device drivers automatically with just one click. It can scan and identify outdated, faulty, missing, and game-ready drivers from a large cloud database of over 2.5 million drivers, and download and install them at an unrivaled speed. It also offers backup and restore features to keep your drivers safe and secure.

-

Download driver booster pro full version


Download Ziphttps://imgfil.com/2uxZpp



-

If you want to enjoy the full benefits of Driver Booster Pro, such as priority updates, larger driver database, faster download speed, driver tweak for better gaming performance, and 24/7 technical support, you need to purchase a license code from the official website or use one of the free license codes provided by some online sources[^5^]. In this article, we will show you how to download and install Driver Booster Pro full version for Windows using a free license code.

-

Step 1: Download Driver Booster Pro

-

You can download Driver Booster Pro from the official website[^3^] or from other trusted sources such as FileCR[^1^] or 4DOWNLOAD[^2^]. The installation file is about 30 MB in size and supports Windows 11/10/8.1/8/7/Vista/XP. Here we will use the FileCR link as an example.

-

Go to https://filecr.com/windows/iobit-driver-booster/ and click on the "Download Now" button. You will be redirected to another page where you can choose a server to download the file. Click on any server and wait for the download to start. You may need to enter a password to extract the file. The password is www.filecr.com.

-

Step 2: Install Driver Booster Pro

-

After downloading the file, double-click on it to run the setup wizard. Follow the on-screen instructions to complete the installation process. You can choose your preferred language, destination folder, and additional tasks such as creating a desktop icon or adding Driver Booster to the system tray.

-

-

When the installation is finished, click on "Finish" to launch Driver Booster Pro. You will see the main interface of the program, which shows your current driver status and a big "Scan" button.

-

Step 3: Activate Driver Booster Pro

-

To activate Driver Booster Pro full version, you need a valid license code. You can buy one from the official website[^3^] or use one of the free license codes provided by some online sources[^5^]. Here we will use a free license code from nsaneforums.com as an example.

-

Go to https://nsaneforums.com/topic/438923-expired-iobit-driver-booster-pro-v1030-free-1-year-license-code-full-version/ and copy one of the license codes from the list. Make sure you choose a code that is not expired or used by someone else.

-

Go back to Driver Booster Pro and click on the "Enter License" button at the bottom right corner of the interface. Paste the license code into the text box and click on "Activate Now". You will see a message that says "Congratulations! You have successfully activated Driver Booster PRO!"

-

Step 4: Update Your Drivers

-

Now that you have activated Driver Booster Pro full version, you can enjoy all its features and benefits. To update your drivers, simply click on the "Scan" button and wait for Driver Booster Pro to scan your system and find any outdated, faulty, missing, or game-ready drivers. You can see the details of each driver such as its name, version, date, size, status, and source.

-

To update all your drivers at once, click on the "Update Now" button at the top right corner of the interface. You can also choose to update specific drivers by clicking on the "Update" button next to each driver. Driver Booster Pro will download and install the drivers for you automatically

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anime X 1.1.9 APK How to Download and Use It on Your Android Device.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anime X 1.1.9 APK How to Download and Use It on Your Android Device.md deleted file mode 100644 index 53f153f630e5859d970941380bfc477a61332a1e..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anime X 1.1.9 APK How to Download and Use It on Your Android Device.md +++ /dev/null @@ -1,106 +0,0 @@ -
-

Anime X APK 1.1.9 Download: Watch Free Anime Online

-

If you are an anime fan, you might be looking for a way to watch your favorite shows and movies online for free. There are many streaming platforms that offer anime content, but they often require subscriptions or have limited selections. That's why you might want to try Anime X, a free app that lets you watch thousands of anime episodes and movies on your Android device.

-

What is Anime X?

-

Anime X is an app that allows you to stream and download anime videos from various sources, such as Gogoanime, KissAnime, 9Anime, and more. You can choose from a wide range of genres, such as action, comedy, romance, horror, sci-fi, and more. You can also search for specific titles or browse by popularity, ratings, or latest updates.

-

anime x apk 1.1.9 download


Download Filehttps://urlin.us/2uSUCS



-

Features of Anime X

-

Some of the features that make Anime X stand out from other anime apps are:

- -

How to download and install Anime X APK 1.1.9

-

To download and install Anime X APK 1.1.9 on your Android device, you need to follow these steps:

-
    -
  1. Go to the official website of Anime X or click on this link to download the APK file.
  2. -
  3. Once the download is complete, open the file manager on your device and locate the APK file.
  4. -
  5. Tap on the file and allow the installation from unknown sources if prompted.
  6. -
  7. Wait for the installation to finish and then launch the app from your app drawer or home screen.
  8. -
  9. Enjoy watching free anime online with Anime X!
  10. -
-

Why use Anime X?

-

Anime X is one of the best apps for anime lovers who want to watch their favorite shows and movies online for free. It has many advantages over other streaming platforms, such as:

-

Pros and cons of Anime X

- - - - - - - -
ProsCons
It offers a huge collection of anime videos from various sources and genres.It may not have some of the latest or obscure titles that are available on other platforms.
It has a high-quality video player that supports subtitles and other features.It may encounter some buffering or loading issues depending on your internet connection or server availability.
It allows you to download videos for offline viewing or share them with your friends.It may consume a lot of storage space or data usage if you download or stream too many videos.
It has a simple and user-friendly interface that makes it easy to use.It may contain some ads or pop-ups that can be annoying or intrusive.
It is free and does not require any registration or subscription.It may not be legal or safe in some countries or regions where anime piracy is prohibited or regulated.
-

Alternatives to Anime X

-

If If you are not satisfied with Anime X or want to try some other apps that offer similar features, you can check out these alternatives:

-

anime x app apk 1.1.9 free download
-anime x streaming apk 1.1.9 latest version
-anime x mod apk 1.1.9 unlocked features
-anime x pro apk 1.1.9 premium access
-anime x apk 1.1.9 download for android
-anime x apk 1.1.9 download for pc
-anime x apk 1.1.9 download for ios
-anime x apk 1.1.9 download for firestick
-anime x apk 1.1.9 download for smart tv
-anime x apk 1.1.9 download for windows 10
-anime x apk 1.1.9 download no ads
-anime x apk 1.1.9 download offline mode
-anime x apk 1.1.9 download with subtitles
-anime x apk 1.1.9 download high quality
-anime x apk 1.1.9 download fast speed
-anime x apk 1.1.9 download filehippo.com[^4^]
-anime x apk 1.1.9 download gitlab.com[^2^] [^3^]
-anime x apk 1.1.9 download apkpure.com
-anime x apk 1.1.9 download uptodown.com
-anime x apk 1.1.9 download malavida.com
-anime x apk 1.1.9 download softonic.com
-anime x apk 1.1.9 download androidapksfree.com
-anime x apk 1.1.9 download apkmirror.com
-anime x apk 1.1.9 download apkmody.io
-anime x apk 1.1.9 download happymod.com
-how to install anime x apk 1.1.9 on android
-how to install anime x apk 1.1.9 on pc
-how to install anime x apk 1.1.9 on ios
-how to install anime x apk 1.1.9 on firestick
-how to install anime x apk 1.1.9 on smart tv
-how to install anime x apk 1.1.9 on windows 10
-how to update anime x apk to version 1.2 or higher
-how to fix anime x apk not working or crashing issues
-how to uninstall or delete anime x apk from device
-how to use anime x apk to watch and download anime online
-what is new in anime x apk version 1.2 or higher
-what are the features and benefits of anime x apk version 1.x.x
-what are the requirements and compatibility of anime x apk version 2.x.x
-what are the alternatives and competitors of anime x apk in the market
-what are the reviews and ratings of anime x apk by users and experts
-is anime x apk safe and secure to use and download
-is anime x apk legal and ethical to use and download
-is anime x apk free or paid to use and download
-is anime x apk original or modified to use and download
-is anime x apk available or banned in my country or region

- -

Conclusion

-

Anime X is one of the best apps for anime fans who want to watch free anime online on their Android devices. It has many features that make it stand out from other streaming platforms, such as a huge collection of anime videos, a high-quality video player, a download and share option, a simple and user-friendly interface, and a notification system. However, it also has some drawbacks, such as limited or outdated titles, buffering or loading issues, storage or data consumption, ads or pop-ups, and legal or safety concerns. Therefore, you should use it at your own risk and discretion. Alternatively, you can try some other apps that offer similar features, such as AnimeFlix, AnimeGlare, or AnimeZone.

-

FAQs

-

Here are some of the frequently asked questions about Anime X:

-
    -
  1. Is Anime X safe to use?
  2. -

    Anime X is not available on the Google Play Store or any other official app store. Therefore, you need to download it from third-party sources that may not be secure or reliable. Moreover, Anime X may contain some ads or pop-ups that can redirect you to malicious or inappropriate websites. Furthermore, Anime X may not be legal or safe in some countries or regions where anime piracy is prohibited or regulated. Therefore, you should use it at your own risk and discretion.

    -
  3. Does Anime X require any permissions?
  4. -

    Anime X requires some permissions to function properly on your device. These include access to your storage (to download videos), access to your network (to stream videos), access to your device settings (to change screen orientation), and access to your notifications (to alert you of new updates). You can grant or deny these permissions as per your preference.

    -
  5. How can I update Anime X?
  6. -

    Anime X does not have an auto-update feature. Therefore, you need to manually check for updates on the official website of Anime X or on the app itself. If there is a new version available, you can download and install it following the same steps as mentioned above.

    -
  7. How can I contact the developers of Anime X?
  8. -

    If you have any questions, suggestions, feedbacks, or complaints about Anime X, you can contact the developers via email at animexapp@gmail.com or via their Facebook page . They will try to respond to you as soon as possible.

    -
  9. How can I support the developers of Anime X?
  10. -

    If you like Anime X and want to support the developers for their hard work and dedication, you can donate to them via PayPal or Patreon . You can also rate and review the app on their website or on their Facebook page . You can also share the app with your friends and family who love anime.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Caves (Roguelike) APK A Mod Menu for Every Play Style and Preference.md b/spaces/1phancelerku/anime-remove-background/Caves (Roguelike) APK A Mod Menu for Every Play Style and Preference.md deleted file mode 100644 index ee07bd03372ebe702ecb94b8131ae705c0eb080a..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Caves (Roguelike) APK A Mod Menu for Every Play Style and Preference.md +++ /dev/null @@ -1,148 +0,0 @@ -
-

What is Caves (Roguelike)?

-

Caves (Roguelike) is a pixel-art dungeon crawler game that challenges you to explore randomly generated underground levels, fight monsters, collect loot, and upgrade your character. The game is inspired by classic roguelike games such as Rogue, Nethack, and Dungeon Crawl Stone Soup, which means that every run is different and death is permanent. You can choose from various classes, skills, and items to customize your playstyle and strategy.

-

Why play Caves (Roguelike)?

-

If you are looking for a game that offers a high level of replayability, difficulty, and variety, then Caves (Roguelike) is a great choice. Here are some reasons why you should play this game:

-

caves (roguelike mod menu apk)


Download File »»» https://jinyurl.com/2uNSOB



- -

How to play Caves (Roguelike)?

-

The basic gameplay of Caves (Roguelike) is similar to other roguelike games. You start by choosing a class from the available options, such as warrior, mage, rogue, or priest. Each class has its own strengths, weaknesses, and skills. Then, you enter the dungeon and explore each floor by moving around with the arrow keys or tapping on the screen. You can interact with objects, such as chests, doors, switches, or stairs, by pressing the spacebar or tapping on them. You can also use items from your inventory by pressing the I key or tapping on the backpack icon. You can fight enemies by moving into them or using skills from your skill bar by pressing the number keys or tapping on the skill icons. You can also use potions or scrolls from your quick slots by pressing the Q or E keys or tapping on the potion or scroll icons. You can also access the game menu by pressing the ESC key or tapping on the menu icon. The game menu allows you to save, load, quit, or change the game settings. Your goal is to reach the deepest level of the dungeon and defeat the final boss. Along the way, you will find various items, such as weapons, armor, rings, amulets, or artifacts, that can improve your stats and abilities. You will also gain experience points and level up by killing enemies, which will allow you to increase your attributes and skills. However, you will also face many dangers, such as traps, curses, diseases, or hunger, that can hinder your progress and end your run. You have to be careful and smart to survive and succeed in Caves (Roguelike).

-

Tips and tricks for Caves (Roguelike)

-

Caves (Roguelike) is a challenging game that requires a lot of trial and error and learning from your mistakes. Here are some tips and tricks that can help you improve your performance and enjoyment of the game:

- -

What is a mod menu apk?

-

A mod menu apk is a modified version of an original game application that allows you to access various cheats, hacks, or features that are not available in the official game. A mod menu apk usually has a menu interface that lets you toggle on or off different options and settings that can alter the gameplay in various ways. For example, you can enable unlimited money, health, ammo, or resources; unlock all levels, items, or characters; activate god mode, speed hack, or wall hack; or customize the graphics, sound, or controls of the game. A mod menu apk can enhance your gaming experience by making it more fun, easy, or interesting.

-

caves roguelike mod apk unlimited skills
-caves roguelike mod menu apk download
-caves roguelike mod apk latest version
-caves roguelike mod menu apk free
-caves roguelike mod apk android
-caves roguelike mod menu apk 2023
-caves roguelike mod apk no root
-caves roguelike mod menu apk offline
-caves roguelike mod apk unlimited money
-caves roguelike mod menu apk hack
-caves roguelike mod apk 0.95.2.41
-caves roguelike mod menu apk online
-caves roguelike mod apk unlimited health
-caves roguelike mod menu apk 36dev
-caves roguelike mod apk premium
-caves roguelike mod menu apk cheats
-caves roguelike mod apk unlocked
-caves roguelike mod menu apk update
-caves roguelike mod apk pro
-caves roguelike mod menu apk review
-caves roguelike mod apk full
-caves roguelike mod menu apk cracked
-caves roguelike mod apk paid
-caves roguelike mod menu apk reddit
-caves roguelike mod apk mega
-caves roguelike mod menu apk mediafire
-caves roguelike mod apk vip
-caves roguelike mod menu apk tutorial
-caves roguelike mod apk obb
-caves roguelike mod menu apk gameplay

-

Benefits of using a mod menu apk

-

Using a mod menu apk can have many benefits for players who want to enjoy their games in different ways. Here are some of the advantages that a mod menu apk can offer:

- -

Risks of using a mod menu apk

-

However, using a mod menu apk can also have some drawbacks and dangers for players who are not careful or aware of the consequences. Here are some of the risks that a mod menu apk can pose:

- -

How to download and install Caves (Roguelike) mod menu apk?

-

If you want to try out Caves (Roguelike) mod menu apk for yourself, you will need to follow some simple steps to download and install it on your device. Here is a step-by-step guide on how to do it:

-
    -
  1. Find a reliable and safe source for Caves (Roguelike) mod menu apk. You can use one of the websites listed below in this article.
  2. -
  3. Download the mod menu apk file from the website to your device. Make sure you have enough storage space and a stable internet connection.
  4. -
  5. Enable the installation of unknown sources on your device. You can do this by going to your device settings, security, and allowing unknown sources.
  6. -
  7. Locate the downloaded mod menu apk file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.
  8. -
  9. Launch the game from your device and enjoy the mod menu apk features. You can access the mod menu by tapping on the icon on the top left corner of the screen. You can also adjust the mod settings from the game menu.
  10. -
-

Requirements for Caves (Roguelike) mod menu apk

-

Before you download and install Caves (Roguelike) mod menu apk, you should make sure that your device meets the minimum and recommended specifications for the game. Here are the requirements for Caves (Roguelike) mod menu apk:

- - - - - - - - - - - - - - - - - - - - - -
MinimumRecommended
Android 4.1 or higherAndroid 6.0 or higher
1 GB of RAM2 GB of RAM or more
100 MB of free storage space200 MB of free storage space or more
A stable internet connectionA fast and reliable internet connection
-

Sources for Caves (Roguelike) mod menu apk

-

There are many websites that offer Caves (Roguelike) mod menu apk for download, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, or fake files that can harm your device or data. Therefore, you should be careful and selective when choosing a source for Caves (Roguelike) mod menu apk. Here are some of the reliable and safe websites where you can download Caves (Roguelike) mod menu apk:

- -

What features does Caves (Roguelike) mod menu apk have?

-

Caves (Roguelike) mod menu apk has many features that can enhance your gaming experience and make it more fun, easy, or interesting. Here are some of the features that Caves (Roguelike) mod menu apk provides:

-

Unlimited skills

-

This feature allows you to use any skill without cooldown or cost. This means that you can spam your skills as much as you want without worrying about running out of mana or waiting for them to recharge. This can give you an edge in combat and help you defeat enemies faster and easier.

-

How to activate unlimited skills

-

To activate unlimited skills, you need to go to the mod menu by tapping on the icon on the top left corner of the screen. Then, you need to toggle on the option that says "Unlimited Skills". You will see a green check mark next to it when it is enabled. You can also toggle it off by tapping on it again.

-

God mode

-

This feature makes you invincible and immune to damage. This means that you can survive any attack or trap without losing any health or dying. This can make you unstoppable and fearless in exploring the dungeon and facing any enemy or boss.

-

How to activate god mode

-

To activate god mode, you need to go to the mod menu by tapping on the icon on the top left corner of the screen. Then, you need to toggle on the option that says "God Mode". You will see a green check mark next to it when it is enabled. You can also toggle it off by tapping on it again.

-

Conclusion

-

Caves (Roguelike) is a pixel-art dungeon crawler game that offers a high level of replayability, difficulty, and variety. You can choose from various classes, skills, and items to customize your playstyle and strategy. You can also use a mod menu apk to access various cheats, hacks, or features that can alter the gameplay in various ways. However, you should also be aware of the risks and consequences of using a mod menu apk and use it responsibly and ethically. If you want to download and install Caves (Roguelike) mod menu apk, you can follow the steps and sources provided in this article. Have fun and enjoy the game!

-

FAQs

-

Here are some frequently asked questions and their answers about Caves (Roguelike) and its mod menu apk:

-
    -
  1. Q: Is Caves (Roguelike) free to play?
  2. -
  3. A: Yes, Caves (Roguelike) is free to play and download from the Google Play Store or other official sources. However, the game may contain ads or in-app purchases that require real money.
  4. -
  5. Q: Is Caves (Roguelike) mod menu apk safe to use?
  6. -
  7. A: It depends on the source and quality of the mod menu apk. Some mod menu apks may contain viruses, malware, or fake files that can harm your device or data. Therefore, you should only download and install mod menu apks from reliable and safe websites that have positive ratings and reviews from other users.
  8. -
  9. Q: Is Caves (Roguelike) mod menu apk legal to use?
  10. -
  11. A: It depends on the laws and regulations of your country or region. Some countries or regions may prohibit or restrict the use of mod menu apks or other forms of game modification or cheating. Therefore, you should check the terms of service and policies of the game developers or publishers and the laws and regulations of your country or region before using a mod menu apk.
  12. -
  13. Q: How can I update Caves (Roguelike) mod menu apk?
  14. -
  15. A: You can update Caves (Roguelike) mod menu apk by downloading and installing the latest version of the mod menu apk from the same source that you used before. However, you should also backup your game data before updating to avoid losing your progress or settings.
  16. -
  17. Q: How can I uninstall Caves (Roguelike) mod menu apk?
  18. -
  19. A: You can uninstall Caves (Roguelike) mod menu apk by deleting the mod menu apk file from your device or by using an uninstaller app that can remove all traces of the mod menu apk from your device. However, you should also backup your game data before uninstalling to avoid losing your progress or settings.
  20. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Experience the Legendary Stick War with MOD APK Features.md b/spaces/1phancelerku/anime-remove-background/Experience the Legendary Stick War with MOD APK Features.md deleted file mode 100644 index 9897ca2c5a94fb02820d9d9125966f6b5f201d89..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Experience the Legendary Stick War with MOD APK Features.md +++ /dev/null @@ -1,93 +0,0 @@ - -

Stick War: Legacy MOD APK - The Ultimate Strategy Game for Android

-

If you are a fan of strategy games, you might have heard of Stick War: Legacy, one of the most popular and addictive web games ever. Now, you can enjoy this game on your Android device with Stick War: Legacy MOD APK, a modified version that gives you unlimited gems, unlocked skins and weapons, and no ads. In this article, we will tell you everything you need to know about Stick War: Legacy MOD APK, including its features, how to download and install it, and some tips and tricks for playing it.

-

stickman war mod apk


Download Ziphttps://jinyurl.com/2uNRD2



-

Introduction

-

Stick War: Legacy is a strategy game that puts you in charge of an army of stickmen who are fighting against other stickmen nations. You can control each and every character in your army, from miners who gather resources, to swordsmen who slash enemies, to archers who shoot arrows from afar. You can also use spells and special abilities to turn the tide of the battle. Your goal is to conquer all the territories on the map and become the ultimate stickman leader.

-

What is Stick War: Legacy?

-

Stick War: Legacy is the official mobile version of the original web game, Stick War. It was developed by Max Games Studios and released in 2016. It has been downloaded over 100 million times on Google Play Store and has an average rating of 4.5 out of 5 stars. It features several game modes, such as Campaign, Endless Deads, Tournament, and Sandbox. It also has different difficulty levels, from Normal to Insane. You can play Stick War: Legacy for free, but you will have to watch ads and earn gems slowly.

-

What is Stick War: Legacy MOD APK?

-

Stick War: Legacy MOD APK is a modified version of the original game that gives you some advantages that make the game more fun and easy. With Stick War: Legacy MOD APK, you will get unlimited gems, which are the main currency in the game. You can use gems to buy skins and weapons for your units, upgrade your spells and abilities, and unlock new game modes. You will also get all the skins and weapons unlocked from the start, so you can customize your army as you wish. Moreover, you will not see any ads in the game, which can be annoying and distracting.

-

Features of Stick War: Legacy MOD APK

-

Here are some of the main features of Stick War: Legacy MOD APK that make it worth downloading:

-

Unlimited Gems

-

Gems are essential in Stick War: Legacy, as they allow you to buy and upgrade various items in the game. However, earning gems in the original game can be slow and tedious, as you have to complete missions, watch ads, or spend real money. With Stick War: Legacy MOD APK, you will get unlimited gems from the start, so you can buy anything you want without worrying about running out.

-

stickman war legacy mod apk unlimited gems
-stickman war 2 mod apk download
-stickman war zombie mod apk
-stickman war legacy mod apk latest version
-stickman war hack mod apk
-stickman war legacy mod apk android 1
-stickman war shadow mod apk
-stickman war legacy mod apk revdl
-stickman war legacy mod apk happymod
-stickman war legacy mod apk unlimited money
-stickman war legacy 2 mod apk
-stickman war draw mod apk
-stickman war legacy mod apk rexdl
-stickman war legacy mod apk 2023.2.85
-stickman war legacy mod apk offline
-stickman war legacy mod apk free shopping
-stickman war world mod apk
-stickman war legacy mod apk no ads
-stickman war legacy mod apk unlimited everything
-stickman war legacy mod apk online
-stickman war empire mod apk
-stickman war legacy mod apk unlimited health
-stickman war of clans mod apk
-stickman war legacy 3d mod apk
-stickman war legacy mod apk all unlocked
-stickman war heroes mod apk
-stickman war strategy mod apk
-stickman war legacy mega mod apk
-stickman war simulator mod apk
-stickman war legacy hack mod apk download
-stickman war z mod apk unlimited money and gems
-stickman war battlegrounds mod apk
-stickman war legacy god mode mod apk
-stickman war castle defense mod apk
-stickman war legacy cheat mod apk
-stickman war super dragon legend mod apk
-stickman war ultimate challenge mod apk
-stickman war legacy infinite gems mod apk
-stickman war army vs zombies mod apk
-stickman war shadow warrior legend mod apk

-

Unlocked Skins and Weapons

-

Skins and weapons are cosmetic items that change the appearance of your units. They do not affect their performance or stats, but they can make your army look more cool and unique. In the original game, you have to buy skins and weapons with gems or unlock them by playing certain game modes. With Stick War: Legacy MOD APK, you will get all the skins and weapons unlocked from the start, so you can choose your favorite ones without spending any gems.

-

No Ads

-

Ads are one of the most annoying things in any game, especially when they interrupt your gameplay or force you to watch them to get rewards. In the original game, you have to watch ads to get extra gems, unlock game modes, or revive your units. With Stick War: Legacy MOD APK, you will not see any ads in the game, which will make your gaming experience more smooth and enjoyable.

-

How to Download and Install Stick War: Legacy MOD APK

-

If you are interested in downloading and installing Stick War: Legacy MOD APK, you can follow these simple steps:

-

Step 1: Enable Unknown Sources

-

Before you can install any APK file on your Android device, you have to enable the option of unknown sources, which allows you to install apps from sources other than Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.

-

Step 2: Download the APK File

-

Next, you have to download the APK file of Stick War: Legacy MOD APK from a reliable source. You can use the link below to download it directly to your device. The file size is about 100 MB, so make sure you have enough storage space and a stable internet connection.

-

Download Stick War: Legacy MOD APK

-

Step 3: Install the APK File

-

Once you have downloaded the APK file, you can install it by tapping on it and following the instructions on the screen. The installation process should take a few seconds, and then you will see the icon of Stick War: Legacy MOD APK on your home screen or app drawer.

-

Step 4: Enjoy the Game

-

Now, you can launch the game and enjoy all the features of Stick War: Legacy MOD APK. You will see that you have unlimited gems, unlocked skins and weapons, and no ads. You can start playing the game mode of your choice and conquer all the stickman nations.

-

Tips and Tricks for Playing Stick War: Legacy MOD APK

-

Stick War: Legacy MOD APK is a fun and easy game to play, but it can also be challenging and strategic at times. Here are some tips and tricks that can help you improve your skills and win more battles:

-

Choose Your Strategy Wisely

-

In Stick War: Legacy MOD APK, you can choose from different strategies to lead your army. You can either be aggressive and attack your enemies head-on, or defensive and build up your defenses and resources. You can also be balanced and mix both approaches. Each strategy has its pros and cons, so you have to consider the situation and the enemy before deciding. For example, if you are facing a strong enemy with powerful units, you might want to be defensive and wait for an opening. On the other hand, if you are facing a weak enemy with few units, you might want to be aggressive and finish them off quickly.

-

Upgrade Your Units and Spells

-

In Stick War: Legacy MOD APK, you can upgrade your units and spells with gems. Upgrading your units will increase their health, damage, speed, and range. Upgrading your spells will increase their power, duration, and cooldown. Upgrading is essential if you want to keep up with the increasing difficulty of the game. You should upgrade your units and spells regularly and evenly, so that they are all effective and useful in different situations.

-

Use Your Special Abilities

-

In Stick War: Legacy MOD APK, you can use special abilities that can give you an edge in battle. These abilities include summoning giants, controlling a single unit, casting spells, or using items. Each ability has a different effect and cost, so you have to use them wisely and sparingly. You should use your abilities when they are most needed or when they can make a big difference in the outcome of the battle. For example, you can use the giant ability to break through enemy defenses or crush their units. You can use the control ability to take over an enemy unit or a powerful unit of your own. You can use the spell ability to heal your units or damage your enemies. You can use the item ability to boost your units or hinder your enemies.

-

Conclusion

-

Stick War: Legacy MOD APK is a great game for anyone who loves strategy games and stickman games. It is fun, addictive, challenging, and rewarding. It has amazing graphics, sound effects, animations, and gameplay. It has various game modes, difficulty levels, skins, weapons, spells, abilities, and items. It has unlimited gems, unlocked skins and weapons, and no ads. It is easy to download and install, and easy to play. It is the ultimate strategy game for Android. If you are looking for a game that will keep you entertained for hours, you should definitely try Stick War: Legacy MOD APK. You will not regret it.

-

FAQs

-

Here are some of the frequently asked questions about Stick War: Legacy MOD APK:

-

Is Stick War: Legacy MOD APK safe to download and install?

-

Yes, Stick War: Legacy MOD APK is safe to download and install, as long as you use a reliable source and follow the instructions carefully. The APK file does not contain any viruses, malware, or spyware that can harm your device or compromise your privacy. However, you should always be careful when downloading and installing any APK file from unknown sources, as they might not be trustworthy or compatible with your device.

-

Is Stick War: Legacy MOD APK compatible with my device?

-

Stick War: Legacy MOD APK is compatible with most Android devices that run on Android 4.4 or higher. It does not require root access or any special permissions to work. However, some devices might not support the game or the mod features due to different specifications or settings. If you encounter any problems or errors while playing the game, you can try to update your device, clear your cache, or reinstall the game.

-

Can I play Stick War: Legacy MOD APK online or offline?

-

Stick War: Legacy MOD APK can be played both online and offline. You can play the game online if you want to access the leaderboards, achievements, or other online features. You can also play the game offline if you do not have an internet connection or if you want to save your data. However, some game modes or features might not be available offline, such as Tournament or Endless Deads.

-

Can I play Stick War: Legacy MOD APK with my friends?

-

Unfortunately, Stick War: Legacy MOD APK does not have a multiplayer mode or a co-op mode that allows you to play with your friends. The game is a single-player game that pits you against AI-controlled enemies. However, you can still compete with your friends by comparing your scores, achievements, or strategies on the leaderboards or social media.

-

Can I update Stick War: Legacy MOD APK?

-

Yes, you can update Stick War: Legacy MOD APK whenever there is a new version available. However, you have to download and install the new version manually from the same source that you used before. You cannot update the game from Google Play Store or any other app store, as they will not recognize the modded version of the game. You should also backup your game data before updating, as you might lose your progress or settings.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537227KB.py b/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index a1bb530e006482704f234c2e739a695174142941..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/commons.py b/spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/AIConsultant/MusicGen/audiocraft/grids/compression/_explorers.py b/spaces/AIConsultant/MusicGen/audiocraft/grids/compression/_explorers.py deleted file mode 100644 index eed30d5b8a1c14676503148ddf133c79ed2e33bf..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/grids/compression/_explorers.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import treetable as tt - -from .._base_explorers import BaseExplorer - - -class CompressionExplorer(BaseExplorer): - eval_metrics = ["sisnr", "visqol"] - - def stages(self): - return ["train", "valid", "evaluate"] - - def get_grid_meta(self): - """Returns the list of Meta information to display for each XP/job. - """ - return [ - tt.leaf("index", align=">"), - tt.leaf("name", wrap=140), - tt.leaf("state"), - tt.leaf("sig", align=">"), - ] - - def get_grid_metrics(self): - """Return the metrics that should be displayed in the tracking table. - """ - return [ - tt.group( - "train", - [ - tt.leaf("epoch"), - tt.leaf("bandwidth", ".2f"), - tt.leaf("adv", ".4f"), - tt.leaf("d_loss", ".4f"), - ], - align=">", - ), - tt.group( - "valid", - [ - tt.leaf("bandwidth", ".2f"), - tt.leaf("adv", ".4f"), - tt.leaf("msspec", ".4f"), - tt.leaf("sisnr", ".2f"), - ], - align=">", - ), - tt.group( - "evaluate", [tt.leaf(name, ".3f") for name in self.eval_metrics], align=">" - ), - ] diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Phind.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Phind.py deleted file mode 100644 index 9fa8ec821f701d7841432e498a11ac9dd017978c..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Phind.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import json -import time -import subprocess - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://phind.com' -model = ['gpt-4'] -supports_stream = True - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'model': model, - 'messages': messages}, separators=(',', ':')) - - cmd = ['python', f'{path}/helpers/phind.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - if b'Just a moment...' in line: - os.system('clear' if os.name == 'posix' else 'cls') - yield 'Clouflare error, please try again...' - os._exit(0) - - else: - if b'ping - 2023-' in line: - continue - - yield line.decode('cp1251') #[:-1] - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/util.py b/spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/util.py deleted file mode 100644 index 29724d52a3863cb307945b7170e16b32a59609ae..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/util.py +++ /dev/null @@ -1,203 +0,0 @@ -import math - -import cv2 -import matplotlib -import numpy as np - - -def padRightDownCorner(img, stride, padValue): - h = img.shape[0] - w = img.shape[1] - - pad = 4 * [None] - pad[0] = 0 # up - pad[1] = 0 # left - pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down - pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right - - img_padded = img - pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1)) - img_padded = np.concatenate((pad_up, img_padded), axis=0) - pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1)) - img_padded = np.concatenate((pad_left, img_padded), axis=1) - pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1)) - img_padded = np.concatenate((img_padded, pad_down), axis=0) - pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1)) - img_padded = np.concatenate((img_padded, pad_right), axis=1) - - return img_padded, pad - - -# transfer caffe model to pytorch which will match the layer name -def transfer(model, model_weights): - transfered_model_weights = {} - for weights_name in model.state_dict().keys(): - transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] - return transfered_model_weights - - -# draw the body keypoint and lims -def draw_bodypose(canvas, candidate, subset): - stickwidth = 4 - limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ - [1, 16], [16, 18], [3, 17], [6, 18]] - - colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ - [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ - [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] - for i in range(18): - for n in range(len(subset)): - index = int(subset[n][i]) - if index == -1: - continue - x, y = candidate[index][0:2] - cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) - for i in range(17): - for n in range(len(subset)): - index = subset[n][np.array(limbSeq[i]) - 1] - if -1 in index: - continue - cur_canvas = canvas.copy() - Y = candidate[index.astype(int), 0] - X = candidate[index.astype(int), 1] - mX = np.mean(X) - mY = np.mean(Y) - length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5 - angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) - polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) - cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) - canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) - # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]]) - # plt.imshow(canvas[:, :, [2, 1, 0]]) - return canvas - - -# image drawed by opencv is not good. -def draw_handpose(canvas, all_hand_peaks, show_number=False): - edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ - [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] - - for peaks in all_hand_peaks: - for ie, e in enumerate(edges): - if np.sum(np.all(peaks[e], axis=1) == 0) == 0: - x1, y1 = peaks[e[0]] - x2, y2 = peaks[e[1]] - cv2.line( - canvas, (x1, y1), (x2, y2), - matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, - thickness=2) - - for i, keyponit in enumerate(peaks): - x, y = keyponit - cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) - if show_number: - cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA) - return canvas - - -# detect hand according to body pose keypoints -# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp -def handDetect(candidate, subset, oriImg): - # right hand: wrist 4, elbow 3, shoulder 2 - # left hand: wrist 7, elbow 6, shoulder 5 - ratioWristElbow = 0.33 - detect_result = [] - image_height, image_width = oriImg.shape[0:2] - for person in subset.astype(int): - # if any of three not detected - has_left = np.sum(person[[5, 6, 7]] == -1) == 0 - has_right = np.sum(person[[2, 3, 4]] == -1) == 0 - if not (has_left or has_right): - continue - hands = [] - #left hand - if has_left: - left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]] - x1, y1 = candidate[left_shoulder_index][:2] - x2, y2 = candidate[left_elbow_index][:2] - x3, y3 = candidate[left_wrist_index][:2] - hands.append([x1, y1, x2, y2, x3, y3, True]) - # right hand - if has_right: - right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]] - x1, y1 = candidate[right_shoulder_index][:2] - x2, y2 = candidate[right_elbow_index][:2] - x3, y3 = candidate[right_wrist_index][:2] - hands.append([x1, y1, x2, y2, x3, y3, False]) - - for x1, y1, x2, y2, x3, y3, is_left in hands: - # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox - # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]); - # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]); - # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow); - # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder); - # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder); - x = x3 + ratioWristElbow * (x3 - x2) - y = y3 + ratioWristElbow * (y3 - y2) - distanceWristElbow = math.sqrt((x3 - x2)**2 + (y3 - y2)**2) - distanceElbowShoulder = math.sqrt((x2 - x1)**2 + (y2 - y1)**2) - width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) - # x-y refers to the center --> offset to topLeft point - # handRectangle.x -= handRectangle.width / 2.f; - # handRectangle.y -= handRectangle.height / 2.f; - x -= width / 2 - y -= width / 2 # width = height - # overflow the image - if x < 0: x = 0 - if y < 0: y = 0 - width1 = width - width2 = width - if x + width > image_width: width1 = image_width - x - if y + width > image_height: width2 = image_height - y - width = min(width1, width2) - # the max hand box value is 20 pixels - if width >= 20: - detect_result.append([int(x), int(y), int(width), is_left]) - ''' - return value: [[x, y, w, True if left hand else False]]. - width=height since the network require squared input. - x, y is the coordinate of top left - ''' - return detect_result - - -# get max index of 2d array -def npmax(array): - arrayindex = array.argmax(1) - arrayvalue = array.max(1) - i = arrayvalue.argmax() - j = arrayindex[i] - return i, j - - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - - -def resize_image(input_image, resolution): - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) - return img diff --git a/spaces/Adapter/T2I-Adapter/docs/examples.md b/spaces/Adapter/T2I-Adapter/docs/examples.md deleted file mode 100644 index 4e422ee622b7a6e2042776df3944b255368cdb49..0000000000000000000000000000000000000000 --- a/spaces/Adapter/T2I-Adapter/docs/examples.md +++ /dev/null @@ -1,41 +0,0 @@ -# Demos - -## Style Adapter - -

- -

- -## Color Adapter (Spatial Palette) - -

- -

- -## Openpose Adapter - -

- -

- -## Canny Adapter (Edge) - -

- -

- -## Multi-adapters -

- -

- -
- -*T2I adapters naturally support using multiple adapters together.* - -

-The testing script usage for this example is similar to the command line given below, except that we replaced the pretrained SD model with Anything 4.5 and Kenshi - ->python test_composable_adapters.py --prompt "1gril, computer desk, best quality, extremely detailed" --neg_prompt "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality" --depth_cond_path examples/depth/desk_depth.png --depth_cond_weight 1.0 --depth_ckpt models/t2iadapter_depth_sd14v1.pth --depth_type_in depth --pose_cond_path examples/keypose/person_keypose.png --pose_cond_weight 1.5 --ckpt models/anything-v4.0-pruned.ckpt --n_sample 4 --max_resolution 524288 - -[Image source](https://twitter.com/toyxyz3/status/1628375164781211648) diff --git a/spaces/Adr740/CV_XPLORER_POC/app.py b/spaces/Adr740/CV_XPLORER_POC/app.py deleted file mode 100644 index 9be2868d08a29a2e2b2c9e5d1693939a33b34174..0000000000000000000000000000000000000000 --- a/spaces/Adr740/CV_XPLORER_POC/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import gradio as gr -from functools import partial -import os -from get_cv import get_cv - -title = "CV Indexing par Intelligence Artificielle" -desc = "Ceci est un outil qui vous aide à trouver rapidement des CV pertinents en fonction des descriptions de poste. Il suffit de taper simplement ce que vous recherchez dans la zone ci-dessous.\n\n Avec l'aide de l'IA, cet outil est conçu pour simplifier votre recherche de CV en suggérant des résultats qui correspondent le mieux à vos besoins. Vous n'avez qu'à saisir les termes pertinents qui décrivent le poste que vous recherchez et l'outil vous présentera une liste de CV adaptés à vos critères. Cela vous permettra de gagner du temps et de trouver plus facilement les candidats idéaux pour votre entreprise.\n\n" -# warning = "Warning!" -disclaimer = "#### Attention! La méthode utilisée est stochastique et par conséquent les résultats peuvent parfois ne pas respecter parfaitement la requête. SI CELA ARRIVE : essayez d'adapter votre demande en reformulant ou en fournissant plus d'informations, cela fonctionne mieux avec des textes plus longs (fiche de poste par exemple)" -def iter_grid(n_rows, n_cols): - for _ in range(n_rows): - with gr.Row(): - for _ in range(n_cols): - with gr.Column(): - yield -with gr.Blocks(title=title) as demo: - gr.Markdown(f"## {title}") - gr.Markdown(desc) - gr.Markdown(disclaimer) - with gr.Row(): - with gr.Column(scale=4): - text_area = gr.Textbox(placeholder="Écrivez ici", lines=3, label="Décrivez le type de candidat que vous chechez ou copiez collez une fiche de poste") - with gr.Column(scale=1): - number_to_display = gr.Number(value=10,label = "Nombre de candidats à afficher") - submit_button = gr.Button(value="Rechercher des candidats") - pass - - fn = partial(get_cv) - - with gr.Accordion("Tous les résultats:"): - ll = gr.Markdown("Vide") - - - submit_button.click(fn=fn, inputs=[text_area,number_to_display], outputs=[ll]) - -login = os.environ.get("login") -pwd = os.environ.get("pwd") -demo.launch(enable_queue=True,max_threads=40) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/kawaseblurpipeline-plugin.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/kawaseblurpipeline-plugin.d.ts deleted file mode 100644 index dde746c865a482fb1dc8f71310aab1fc136e7ab9..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/kawaseblurpipeline-plugin.d.ts +++ /dev/null @@ -1,30 +0,0 @@ -// import * as Phaser from 'phaser'; -import KawaseBlurFilterPostFxPipeline from './kawaseblurpipeline'; - - -export default KawaseBlurFilterPipelinePlugin; - -declare namespace KawaseBlurFilterPipelinePlugin { - - interface IConfig extends KawaseBlurFilterPostFxPipeline.IConfig { - name?: string, - } - -} - -declare class KawaseBlurFilterPipelinePlugin extends Phaser.Plugins.BasePlugin { - add( - gameObject: Phaser.GameObjects.GameObject | Phaser.Cameras.Scene2D.Camera, - config?: KawaseBlurFilterPipelinePlugin.IConfig - ): KawaseBlurFilterPostFxPipeline; - - remove( - gameObject: Phaser.GameObjects.GameObject, - name?: string - ): this; - - get( - gameObject: Phaser.GameObjects.GameObject, - name?: string - ): KawaseBlurFilterPostFxPipeline | KawaseBlurFilterPostFxPipeline[]; -} \ No newline at end of file diff --git a/spaces/AiiluoChen/webui/README.md b/spaces/AiiluoChen/webui/README.md deleted file mode 100644 index 013d12c9f3a56698056ae1bdbbfb0ec009805237..0000000000000000000000000000000000000000 --- a/spaces/AiiluoChen/webui/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Stable Diffusion Web UI -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: camenduru/webui ---- - -## Stable Diffusion Web UI -[https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - -## Documentation -[https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki) - -## Models License -https://huggingface.co/spaces/CompVis/stable-diffusion-license \ No newline at end of file diff --git a/spaces/AlekseyCalvin/dreambooth-training3/convertosd.py b/spaces/AlekseyCalvin/dreambooth-training3/convertosd.py deleted file mode 100644 index 1211d34edf018b7c402a765c5a7ecdb684cc28e3..0000000000000000000000000000000000000000 --- a/spaces/AlekseyCalvin/dreambooth-training3/convertosd.py +++ /dev/null @@ -1,302 +0,0 @@ -# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint. -# *Only* converts the UNet, VAE, and Text Encoder. -# Does not convert optimizer state or any other thing. - -import argparse -import os.path as osp -import re - -import torch -import gc - -# =================# -# UNet Conversion # -# =================# - -unet_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("time_embed.0.weight", "time_embedding.linear_1.weight"), - ("time_embed.0.bias", "time_embedding.linear_1.bias"), - ("time_embed.2.weight", "time_embedding.linear_2.weight"), - ("time_embed.2.bias", "time_embedding.linear_2.bias"), - ("input_blocks.0.0.weight", "conv_in.weight"), - ("input_blocks.0.0.bias", "conv_in.bias"), - ("out.0.weight", "conv_norm_out.weight"), - ("out.0.bias", "conv_norm_out.bias"), - ("out.2.weight", "conv_out.weight"), - ("out.2.bias", "conv_out.bias"), -] - -unet_conversion_map_resnet = [ - # (stable-diffusion, HF Diffusers) - ("in_layers.0", "norm1"), - ("in_layers.2", "conv1"), - ("out_layers.0", "norm2"), - ("out_layers.3", "conv2"), - ("emb_layers.1", "time_emb_proj"), - ("skip_connection", "conv_shortcut"), -] - -unet_conversion_map_layer = [] -# hardcoded number of downblocks and resnets/attentions... -# would need smarter logic for other networks. -for i in range(4): - # loop over downblocks/upblocks - - for j in range(2): - # loop over resnets/attentions for downblocks - hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." - sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." - unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) - - if i < 3: - # no attention layers in down_blocks.3 - hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." - sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1." - unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) - - for j in range(3): - # loop over resnets/attentions for upblocks - hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." - sd_up_res_prefix = f"output_blocks.{3*i + j}.0." - unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) - - if i > 0: - # no attention layers in up_blocks.0 - hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." - sd_up_atn_prefix = f"output_blocks.{3*i + j}.1." - unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) - - if i < 3: - # no downsample in down_blocks.3 - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." - sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op." - unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) - - # no upsample in up_blocks.3 - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." - unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) - -hf_mid_atn_prefix = "mid_block.attentions.0." -sd_mid_atn_prefix = "middle_block.1." -unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) - -for j in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{j}." - sd_mid_res_prefix = f"middle_block.{2*j}." - unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -def convert_unet_state_dict(unet_state_dict): - # buyer beware: this is a *brittle* function, - # and correct output requires that all of these pieces interact in - # the exact order in which I have arranged them. - mapping = {k: k for k in unet_state_dict.keys()} - for sd_name, hf_name in unet_conversion_map: - mapping[hf_name] = sd_name - for k, v in mapping.items(): - if "resnets" in k: - for sd_part, hf_part in unet_conversion_map_resnet: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - for sd_part, hf_part in unet_conversion_map_layer: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} - return new_state_dict - - -# ================# -# VAE Conversion # -# ================# - -vae_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("nin_shortcut", "conv_shortcut"), - ("norm_out", "conv_norm_out"), - ("mid.attn_1.", "mid_block.attentions.0."), -] - -for i in range(4): - # down_blocks have two resnets - for j in range(2): - hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." - sd_down_prefix = f"encoder.down.{i}.block.{j}." - vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) - - if i < 3: - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." - sd_downsample_prefix = f"down.{i}.downsample." - vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) - - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"up.{3-i}.upsample." - vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) - - # up_blocks have three resnets - # also, up blocks in hf are numbered in reverse from sd - for j in range(3): - hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." - sd_up_prefix = f"decoder.up.{3-i}.block.{j}." - vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) - -# this part accounts for mid blocks in both the encoder and the decoder -for i in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{i}." - sd_mid_res_prefix = f"mid.block_{i+1}." - vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -vae_conversion_map_attn = [ - # (stable-diffusion, HF Diffusers) - ("norm.", "group_norm."), - ("q.", "query."), - ("k.", "key."), - ("v.", "value."), - ("proj_out.", "proj_attn."), -] - - -def reshape_weight_for_sd(w): - # convert HF linear weights to SD conv2d weights - return w.reshape(*w.shape, 1, 1) - - -def convert_vae_state_dict(vae_state_dict): - mapping = {k: k for k in vae_state_dict.keys()} - for k, v in mapping.items(): - for sd_part, hf_part in vae_conversion_map: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - if "attentions" in k: - for sd_part, hf_part in vae_conversion_map_attn: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} - weights_to_convert = ["q", "k", "v", "proj_out"] - print("Converting to CKPT ...") - for k, v in new_state_dict.items(): - for weight_name in weights_to_convert: - if f"mid.attn_1.{weight_name}.weight" in k: - print(f"Reshaping {k} for SD format") - new_state_dict[k] = reshape_weight_for_sd(v) - return new_state_dict - - -# =========================# -# Text Encoder Conversion # -# =========================# - - -textenc_conversion_lst = [ - # (stable-diffusion, HF Diffusers) - ("resblocks.", "text_model.encoder.layers."), - ("ln_1", "layer_norm1"), - ("ln_2", "layer_norm2"), - (".c_fc.", ".fc1."), - (".c_proj.", ".fc2."), - (".attn", ".self_attn"), - ("ln_final.", "transformer.text_model.final_layer_norm."), - ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), - ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), -] -protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} -textenc_pattern = re.compile("|".join(protected.keys())) - -# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp -code2idx = {"q": 0, "k": 1, "v": 2} - - -def convert_text_enc_state_dict_v20(text_enc_dict): - new_state_dict = {} - capture_qkv_weight = {} - capture_qkv_bias = {} - for k, v in text_enc_dict.items(): - if ( - k.endswith(".self_attn.q_proj.weight") - or k.endswith(".self_attn.k_proj.weight") - or k.endswith(".self_attn.v_proj.weight") - ): - k_pre = k[: -len(".q_proj.weight")] - k_code = k[-len("q_proj.weight")] - if k_pre not in capture_qkv_weight: - capture_qkv_weight[k_pre] = [None, None, None] - capture_qkv_weight[k_pre][code2idx[k_code]] = v - continue - - if ( - k.endswith(".self_attn.q_proj.bias") - or k.endswith(".self_attn.k_proj.bias") - or k.endswith(".self_attn.v_proj.bias") - ): - k_pre = k[: -len(".q_proj.bias")] - k_code = k[-len("q_proj.bias")] - if k_pre not in capture_qkv_bias: - capture_qkv_bias[k_pre] = [None, None, None] - capture_qkv_bias[k_pre][code2idx[k_code]] = v - continue - - relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k) - new_state_dict[relabelled_key] = v - - for k_pre, tensors in capture_qkv_weight.items(): - if None in tensors: - raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") - relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) - new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors) - - for k_pre, tensors in capture_qkv_bias.items(): - if None in tensors: - raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") - relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) - new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors) - - return new_state_dict - - -def convert_text_enc_state_dict(text_enc_dict): - return text_enc_dict - - -def convert(model_path, checkpoint_path): - unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin") - vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin") - text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin") - - # Convert the UNet model - unet_state_dict = torch.load(unet_path, map_location="cpu") - unet_state_dict = convert_unet_state_dict(unet_state_dict) - unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} - - # Convert the VAE model - vae_state_dict = torch.load(vae_path, map_location="cpu") - vae_state_dict = convert_vae_state_dict(vae_state_dict) - vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} - - # Convert the text encoder model - text_enc_dict = torch.load(text_enc_path, map_location="cpu") - - # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper - is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict - - if is_v20_model: - # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm - text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()} - text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict) - text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} - else: - text_enc_dict = convert_text_enc_state_dict(text_enc_dict) - text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} - - # Put together new checkpoint - state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict} - state_dict = {k: v.half() for k, v in state_dict.items()} - state_dict = {"state_dict": state_dict} - torch.save(state_dict, checkpoint_path) - del state_dict, text_enc_dict, vae_state_dict, unet_state_dict - torch.cuda.empty_cache() - gc.collect() - \ No newline at end of file diff --git a/spaces/Alfasign/HuggingGPT-Lite/models_server.py b/spaces/Alfasign/HuggingGPT-Lite/models_server.py deleted file mode 100644 index 022abc3986adfb76e0f03478a8f9455e96a7cd19..0000000000000000000000000000000000000000 --- a/spaces/Alfasign/HuggingGPT-Lite/models_server.py +++ /dev/null @@ -1,779 +0,0 @@ -import argparse -import logging -import random -import uuid -import numpy as np -from transformers import pipeline -from diffusers import ( - DiffusionPipeline, - StableDiffusionControlNetPipeline, - ControlNetModel, - UniPCMultistepScheduler, -) -from diffusers.utils import load_image -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler -from diffusers.utils import export_to_video -from transformers import BlipProcessor, BlipForConditionalGeneration -from transformers import ( - TrOCRProcessor, - VisionEncoderDecoderModel, - ViTImageProcessor, - AutoTokenizer, -) -from datasets import load_dataset -from PIL import Image -import io -from torchvision import transforms -import torch -import torchaudio -from speechbrain.pretrained import WaveformEnhancement -import joblib -from huggingface_hub import hf_hub_url, cached_download -from transformers import AutoImageProcessor, TimesformerForVideoClassification -from transformers import ( - MaskFormerFeatureExtractor, - MaskFormerForInstanceSegmentation, - AutoFeatureExtractor, -) -from controlnet_aux import ( - OpenposeDetector, - MLSDdetector, - HEDdetector, - CannyDetector, - MidasDetector, -) -from controlnet_aux.open_pose.body import Body -from controlnet_aux.mlsd.models.mbv2_mlsd_large import MobileV2_MLSD_Large -from controlnet_aux.hed import Network -from transformers import DPTForDepthEstimation, DPTFeatureExtractor -import warnings -import time -from espnet2.bin.tts_inference import Text2Speech -import soundfile as sf -from asteroid.models import BaseModel -import traceback -import os -import yaml - -warnings.filterwarnings("ignore") - -parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default="config.yaml") -args = parser.parse_args() - -if __name__ != "__main__": - args.config = "config.gradio.yaml" - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -handler = logging.StreamHandler() -handler.setLevel(logging.INFO) -formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") -handler.setFormatter(formatter) -logger.addHandler(handler) - -config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader) - -local_deployment = config["local_deployment"] -if config["inference_mode"] == "huggingface": - local_deployment = "none" - -PROXY = None -if config["proxy"]: - PROXY = { - "https": config["proxy"], - } - -start = time.time() - -# local_models = "models/" -local_models = "" - - -def load_pipes(local_deployment): - other_pipes = {} - standard_pipes = {} - controlnet_sd_pipes = {} - if local_deployment in ["full"]: - other_pipes = { - # "Salesforce/blip-image-captioning-large": { - # "model": BlipForConditionalGeneration.from_pretrained(f"Salesforce/blip-image-captioning-large"), - # "processor": BlipProcessor.from_pretrained(f"Salesforce/blip-image-captioning-large"), - # "device": "cpu" - # }, - # "damo-vilab/text-to-video-ms-1.7b": { - # "model": DiffusionPipeline.from_pretrained( - # f"{local_models}damo-vilab/text-to-video-ms-1.7b", - # torch_dtype=torch.float16, - # variant="fp16", - # ), - # "device": "cpu", - # }, - # "facebook/maskformer-swin-large-ade": { - # "model": MaskFormerForInstanceSegmentation.from_pretrained(f"facebook/maskformer-swin-large-ade"), - # "feature_extractor" : AutoFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-ade"), - # "device": "cpu" - # }, - # "microsoft/trocr-base-printed": { - # "processor": TrOCRProcessor.from_pretrained(f"microsoft/trocr-base-printed"), - # "model": VisionEncoderDecoderModel.from_pretrained(f"microsoft/trocr-base-printed"), - # "device": "cpu" - # }, - # "microsoft/trocr-base-handwritten": { - # "processor": TrOCRProcessor.from_pretrained(f"microsoft/trocr-base-handwritten"), - # "model": VisionEncoderDecoderModel.from_pretrained(f"microsoft/trocr-base-handwritten"), - # "device": "cpu" - # }, - # "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k": { - # "model": BaseModel.from_pretrained( - # "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k" - # ), - # "device": "cpu", - # }, - # "CompVis/stable-diffusion-v1-4": { - # "model": DiffusionPipeline.from_pretrained(f"CompVis/stable-diffusion-v1-4"), - # "device": "cpu" - # }, - # "stabilityai/stable-diffusion-2-1": { - # "model": DiffusionPipeline.from_pretrained(f"stabilityai/stable-diffusion-2-1"), - # "device": "cpu" - # }, - # "microsoft/speecht5_tts":{ - # "processor": SpeechT5Processor.from_pretrained(f"microsoft/speecht5_tts"), - # "model": SpeechT5ForTextToSpeech.from_pretrained(f"microsoft/speecht5_tts"), - # "vocoder": SpeechT5HifiGan.from_pretrained(f"microsoft/speecht5_hifigan"), - # "embeddings_dataset": load_dataset(f"Matthijs/cmu-arctic-xvectors", split="validation"), - # "device": "cpu" - # }, - # "speechbrain/mtl-mimic-voicebank": { - # "model": WaveformEnhancement.from_hparams(source="speechbrain/mtl-mimic-voicebank", savedir="models/mtl-mimic-voicebank"), - # "device": "cpu" - # }, - # "microsoft/speecht5_vc": { - # "processor": SpeechT5Processor.from_pretrained( - # f"{local_models}microsoft/speecht5_vc" - # ), - # "model": SpeechT5ForSpeechToSpeech.from_pretrained( - # f"{local_models}microsoft/speecht5_vc" - # ), - # "vocoder": SpeechT5HifiGan.from_pretrained( - # f"{local_models}microsoft/speecht5_hifigan" - # ), - # "embeddings_dataset": load_dataset( - # f"{local_models}Matthijs/cmu-arctic-xvectors", split="validation" - # ), - # "device": "cpu", - # }, - # "julien-c/wine-quality": { - # "model": joblib.load(cached_download(hf_hub_url("julien-c/wine-quality", "sklearn_model.joblib"))) - # }, - # "facebook/timesformer-base-finetuned-k400": { - # "processor": AutoImageProcessor.from_pretrained(f"facebook/timesformer-base-finetuned-k400"), - # "model": TimesformerForVideoClassification.from_pretrained(f"facebook/timesformer-base-finetuned-k400"), - # "device": "cpu" - # }, - "facebook/maskformer-swin-base-coco": { - "feature_extractor": MaskFormerFeatureExtractor.from_pretrained( - f"{local_models}facebook/maskformer-swin-base-coco" - ), - "model": MaskFormerForInstanceSegmentation.from_pretrained( - f"{local_models}facebook/maskformer-swin-base-coco" - ), - "device": "cpu", - }, - # "Intel/dpt-hybrid-midas": { - # "model": DPTForDepthEstimation.from_pretrained( - # f"{local_models}Intel/dpt-hybrid-midas", low_cpu_mem_usage=True - # ), - # "feature_extractor": DPTFeatureExtractor.from_pretrained( - # f"{local_models}Intel/dpt-hybrid-midas" - # ), - # "device": "cpu", - # }, - } - - if local_deployment in ["full", "standard"]: - standard_pipes = { - # "nlpconnect/vit-gpt2-image-captioning":{ - # "model": VisionEncoderDecoderModel.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"), - # "feature_extractor": ViTImageProcessor.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"), - # "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"), - # "device": "cpu" - # }, - # "espnet/kan-bayashi_ljspeech_vits": { - # "model": Text2Speech.from_pretrained( - # "espnet/kan-bayashi_ljspeech_vits" - # ), - # "device": "cpu", - # }, - # "lambdalabs/sd-image-variations-diffusers": { - # "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16 - # "device": "cpu" - # }, - # "runwayml/stable-diffusion-v1-5": { - # "model": DiffusionPipeline.from_pretrained( - # f"{local_models}runwayml/stable-diffusion-v1-5" - # ), - # "device": "cpu", - # }, - # "superb/wav2vec2-base-superb-ks": { - # "model": pipeline(task="audio-classification", model=f"superb/wav2vec2-base-superb-ks"), - # "device": "cpu" - # }, - # "openai/whisper-base": { - # "model": pipeline( - # task="automatic-speech-recognition", - # model=f"{local_models}openai/whisper-base", - # ), - # "device": "cpu", - # }, - # "microsoft/speecht5_asr": { - # "model": pipeline(task="automatic-speech-recognition", model=f"{local_models}microsoft/speecht5_asr"), - # "device": "cpu" - # }, - "Intel/dpt-large": { - "model": pipeline( - task="depth-estimation", model=f"{local_models}Intel/dpt-large" - ), - "device": "cpu", - }, - # "microsoft/beit-base-patch16-224-pt22k-ft22k": { - # "model": pipeline(task="image-classification", model=f"microsoft/beit-base-patch16-224-pt22k-ft22k"), - # "device": "cpu" - # }, - "facebook/detr-resnet-50-panoptic": { - "model": pipeline( - task="image-segmentation", - model=f"{local_models}facebook/detr-resnet-50-panoptic", - ), - "device": "cpu", - }, - "facebook/detr-resnet-101": { - "model": pipeline( - task="object-detection", - model=f"{local_models}facebook/detr-resnet-101", - ), - "device": "cpu", - }, - # "openai/clip-vit-large-patch14": { - # "model": pipeline(task="zero-shot-image-classification", model=f"openai/clip-vit-large-patch14"), - # "device": "cpu" - # }, - # "google/owlvit-base-patch32": { - # "model": pipeline(task="zero-shot-object-detection", model=f"{local_models}google/owlvit-base-patch32"), - # "device": "cpu" - # }, - # "microsoft/DialoGPT-medium": { - # "model": pipeline(task="conversational", model=f"microsoft/DialoGPT-medium"), - # "device": "cpu" - # }, - # "bert-base-uncased": { - # "model": pipeline(task="fill-mask", model=f"bert-base-uncased"), - # "device": "cpu" - # }, - # "deepset/roberta-base-squad2": { - # "model": pipeline(task = "question-answering", model=f"deepset/roberta-base-squad2"), - # "device": "cpu" - # }, - # "facebook/bart-large-cnn": { - # "model": pipeline(task="summarization", model=f"facebook/bart-large-cnn"), - # "device": "cpu" - # }, - # "google/tapas-base-finetuned-wtq": { - # "model": pipeline(task="table-question-answering", model=f"google/tapas-base-finetuned-wtq"), - # "device": "cpu" - # }, - # "distilbert-base-uncased-finetuned-sst-2-english": { - # "model": pipeline(task="text-classification", model=f"distilbert-base-uncased-finetuned-sst-2-english"), - # "device": "cpu" - # }, - # "gpt2": { - # "model": pipeline(task="text-generation", model="gpt2"), - # "device": "cpu" - # }, - # "mrm8488/t5-base-finetuned-question-generation-ap": { - # "model": pipeline(task="text2text-generation", model=f"mrm8488/t5-base-finetuned-question-generation-ap"), - # "device": "cpu" - # }, - # "Jean-Baptiste/camembert-ner": { - # "model": pipeline(task="token-classification", model=f"Jean-Baptiste/camembert-ner", aggregation_strategy="simple"), - # "device": "cpu" - # }, - # "t5-base": { - # "model": pipeline(task="translation", model=f"t5-base"), - # "device": "cpu" - # }, - # "impira/layoutlm-document-qa": { - # "model": pipeline(task="document-question-answering", model=f"{local_models}impira/layoutlm-document-qa"), - # "device": "cpu" - # }, - "ydshieh/vit-gpt2-coco-en": { - "model": pipeline( - task="image-to-text", - model=f"{local_models}ydshieh/vit-gpt2-coco-en", - ), - "device": "cpu", - }, - # "dandelin/vilt-b32-finetuned-vqa": { - # "model": pipeline( - # task="visual-question-answering", - # model=f"{local_models}dandelin/vilt-b32-finetuned-vqa", - # ), - # "device": "cpu", - # }, - } - - if local_deployment in ["full", "standard", "minimal"]: - controlnet = ControlNetModel.from_pretrained( - f"{local_models}lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16 - ) - controlnetpipe = StableDiffusionControlNetPipeline.from_pretrained( - f"{local_models}runwayml/stable-diffusion-v1-5", - controlnet=controlnet, - torch_dtype=torch.float16, - ) - - hed_network = HEDdetector.from_pretrained("lllyasviel/ControlNet") - - pipes = {**standard_pipes, **other_pipes} - return pipes - - -pipes = load_pipes(local_deployment) - -end = time.time() -during = end - start - -print(f"[ ready ] {during}s") - - -def running(): - return {"running": True} - - -def status(model_id): - disabled_models = [ - "microsoft/trocr-base-printed", - "microsoft/trocr-base-handwritten", - ] - if model_id in pipes.keys() and model_id not in disabled_models: - print(f"[ check {model_id} ] success") - return {"loaded": True} - else: - print(f"[ check {model_id} ] failed") - return {"loaded": False} - - -def models(model_id, data): - while "using" in pipes[model_id] and pipes[model_id]["using"]: - print(f"[ inference {model_id} ] waiting") - time.sleep(0.1) - pipes[model_id]["using"] = True - print(f"[ inference {model_id} ] start") - - start = time.time() - - pipe = pipes[model_id]["model"] - - if "device" in pipes[model_id]: - try: - pipe.to(pipes[model_id]["device"]) - except: - pipe.device = torch.device(pipes[model_id]["device"]) - pipe.model.to(pipes[model_id]["device"]) - - result = None - try: - # text to video - if model_id == "damo-vilab/text-to-video-ms-1.7b": - pipe.scheduler = DPMSolverMultistepScheduler.from_config( - pipe.scheduler.config - ) - # pipe.enable_model_cpu_offload() - prompt = data["text"] - video_frames = pipe(prompt, num_inference_steps=50, num_frames=40).frames - file_name = str(uuid.uuid4())[:4] - video_path = export_to_video(video_frames, f"public/videos/{file_name}.mp4") - - new_file_name = str(uuid.uuid4())[:4] - os.system( - f"ffmpeg -i {video_path} -vcodec libx264 public/videos/{new_file_name}.mp4" - ) - - if os.path.exists(f"public/videos/{new_file_name}.mp4"): - result = {"path": f"/videos/{new_file_name}.mp4"} - else: - result = {"path": f"/videos/{file_name}.mp4"} - - # controlnet - if model_id.startswith("lllyasviel/sd-controlnet-"): - pipe.controlnet.to("cpu") - pipe.controlnet = pipes[model_id]["control"].to(pipes[model_id]["device"]) - pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - control_image = load_image(data["img_url"]) - # generator = torch.manual_seed(66) - out_image: Image = pipe( - data["text"], num_inference_steps=20, image=control_image - ).images[0] - file_name = str(uuid.uuid4())[:4] - out_image.save(f"public/images/{file_name}.png") - result = {"path": f"/images/{file_name}.png"} - - if model_id.endswith("-control"): - image = load_image(data["img_url"]) - if "scribble" in model_id: - control = pipe(image, scribble=True) - elif "canny" in model_id: - control = pipe(image, low_threshold=100, high_threshold=200) - else: - control = pipe(image) - file_name = str(uuid.uuid4())[:4] - control.save(f"public/images/{file_name}.png") - result = {"path": f"/images/{file_name}.png"} - - # image to image - if model_id == "lambdalabs/sd-image-variations-diffusers": - im = load_image(data["img_url"]) - file_name = str(uuid.uuid4())[:4] - with open(f"public/images/{file_name}.png", "wb") as f: - f.write(data) - tform = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Resize( - (224, 224), - interpolation=transforms.InterpolationMode.BICUBIC, - antialias=False, - ), - transforms.Normalize( - [0.48145466, 0.4578275, 0.40821073], - [0.26862954, 0.26130258, 0.27577711], - ), - ] - ) - inp = tform(im).to(pipes[model_id]["device"]).unsqueeze(0) - out = pipe(inp, guidance_scale=3) - out["images"][0].save(f"public/images/{file_name}.jpg") - result = {"path": f"/images/{file_name}.jpg"} - - # image to text - if model_id == "Salesforce/blip-image-captioning-large": - raw_image = load_image(data["img_url"]).convert("RGB") - text = data["text"] - inputs = pipes[model_id]["processor"](raw_image, return_tensors="pt").to( - pipes[model_id]["device"] - ) - out = pipe.generate(**inputs) - caption = pipes[model_id]["processor"].decode( - out[0], skip_special_tokens=True - ) - result = {"generated text": caption} - if model_id == "ydshieh/vit-gpt2-coco-en": - img_url = data["img_url"] - generated_text = pipe(img_url)[0]["generated_text"] - result = {"generated text": generated_text} - if model_id == "nlpconnect/vit-gpt2-image-captioning": - image = load_image(data["img_url"]).convert("RGB") - pixel_values = pipes[model_id]["feature_extractor"]( - images=image, return_tensors="pt" - ).pixel_values - pixel_values = pixel_values.to(pipes[model_id]["device"]) - generated_ids = pipe.generate( - pixel_values, **{"max_length": 200, "num_beams": 1} - ) - generated_text = pipes[model_id]["tokenizer"].batch_decode( - generated_ids, skip_special_tokens=True - )[0] - result = {"generated text": generated_text} - # image to text: OCR - if ( - model_id == "microsoft/trocr-base-printed" - or model_id == "microsoft/trocr-base-handwritten" - ): - image = load_image(data["img_url"]).convert("RGB") - pixel_values = pipes[model_id]["processor"]( - image, return_tensors="pt" - ).pixel_values - pixel_values = pixel_values.to(pipes[model_id]["device"]) - generated_ids = pipe.generate(pixel_values) - generated_text = pipes[model_id]["processor"].batch_decode( - generated_ids, skip_special_tokens=True - )[0] - result = {"generated text": generated_text} - - # text to image - if model_id == "runwayml/stable-diffusion-v1-5": - file_name = str(uuid.uuid4())[:4] - text = data["text"] - out = pipe(prompt=text) - out["images"][0].save(f"public/images/{file_name}.jpg") - result = {"path": f"/images/{file_name}.jpg"} - - # object detection - if ( - model_id == "google/owlvit-base-patch32" - or model_id == "facebook/detr-resnet-101" - ): - img_url = data["img_url"] - open_types = [ - "cat", - "couch", - "person", - "car", - "dog", - "horse", - "sheep", - "cow", - "elephant", - "bear", - "zebra", - "giraffe", - "backpack", - "umbrella", - "handbag", - "tie", - "suitcase", - "frisbee", - "skis", - "snowboard", - "sports ball", - "kite", - "baseball bat", - "baseball glove", - "skateboard", - "surfboard", - "tennis racket", - "bottle", - "wine glass", - "cup", - "fork", - "knife", - "spoon", - "bowl", - "banana", - "apple", - "sandwich", - "orange", - "broccoli", - "carrot", - "hot dog", - "pizza", - "donut", - "cake", - "chair", - "couch", - "potted plant", - "bed", - "dining table", - "toilet", - "tv", - "laptop", - "mouse", - "remote", - "keyboard", - "cell phone", - "microwave", - "oven", - "toaster", - "sink", - "refrigerator", - "book", - "clock", - "vase", - "scissors", - "teddy bear", - "hair drier", - "toothbrush", - "traffic light", - "fire hydrant", - "stop sign", - "parking meter", - "bench", - "bird", - ] - result = pipe(img_url, candidate_labels=open_types) - - # VQA - if model_id == "dandelin/vilt-b32-finetuned-vqa": - question = data["text"] - img_url = data["img_url"] - result = pipe(question=question, image=img_url) - - # DQA - if model_id == "impira/layoutlm-document-qa": - question = data["text"] - img_url = data["img_url"] - result = pipe(img_url, question) - - # depth-estimation - if model_id == "Intel/dpt-large": - output = pipe(data["img_url"]) - image = output["depth"] - name = str(uuid.uuid4())[:4] - image.save(f"public/images/{name}.jpg") - result = {"path": f"/images/{name}.jpg"} - - if model_id == "Intel/dpt-hybrid-midas" and model_id == "Intel/dpt-large": - image = load_image(data["img_url"]) - inputs = pipes[model_id]["feature_extractor"]( - images=image, return_tensors="pt" - ) - with torch.no_grad(): - outputs = pipe(**inputs) - predicted_depth = outputs.predicted_depth - prediction = torch.nn.functional.interpolate( - predicted_depth.unsqueeze(1), - size=image.size[::-1], - mode="bicubic", - align_corners=False, - ) - output = prediction.squeeze().cpu().numpy() - formatted = (output * 255 / np.max(output)).astype("uint8") - image = Image.fromarray(formatted) - name = str(uuid.uuid4())[:4] - image.save(f"public/images/{name}.jpg") - result = {"path": f"/images/{name}.jpg"} - - # TTS - if model_id == "espnet/kan-bayashi_ljspeech_vits": - text = data["text"] - wav = pipe(text)["wav"] - name = str(uuid.uuid4())[:4] - sf.write(f"public/audios/{name}.wav", wav.cpu().numpy(), pipe.fs, "PCM_16") - result = {"path": f"/audios/{name}.wav"} - - if model_id == "microsoft/speecht5_tts": - text = data["text"] - inputs = pipes[model_id]["processor"](text=text, return_tensors="pt") - embeddings_dataset = pipes[model_id]["embeddings_dataset"] - speaker_embeddings = ( - torch.tensor(embeddings_dataset[7306]["xvector"]) - .unsqueeze(0) - .to(pipes[model_id]["device"]) - ) - pipes[model_id]["vocoder"].to(pipes[model_id]["device"]) - speech = pipe.generate_speech( - inputs["input_ids"].to(pipes[model_id]["device"]), - speaker_embeddings, - vocoder=pipes[model_id]["vocoder"], - ) - name = str(uuid.uuid4())[:4] - sf.write( - f"public/audios/{name}.wav", speech.cpu().numpy(), samplerate=16000 - ) - result = {"path": f"/audios/{name}.wav"} - - # ASR - if model_id == "openai/whisper-base" or model_id == "microsoft/speecht5_asr": - audio_url = data["audio_url"] - result = {"text": pipe(audio_url)["text"]} - - # audio to audio - if model_id == "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k": - audio_url = data["audio_url"] - wav, sr = torchaudio.load(audio_url) - with torch.no_grad(): - result_wav = pipe(wav.to(pipes[model_id]["device"])) - name = str(uuid.uuid4())[:4] - sf.write( - f"public/audios/{name}.wav", result_wav.cpu().squeeze().numpy(), sr - ) - result = {"path": f"/audios/{name}.wav"} - - if model_id == "microsoft/speecht5_vc": - audio_url = data["audio_url"] - wav, sr = torchaudio.load(audio_url) - inputs = pipes[model_id]["processor"]( - audio=wav, sampling_rate=sr, return_tensors="pt" - ) - embeddings_dataset = pipes[model_id]["embeddings_dataset"] - speaker_embeddings = torch.tensor( - embeddings_dataset[7306]["xvector"] - ).unsqueeze(0) - pipes[model_id]["vocoder"].to(pipes[model_id]["device"]) - speech = pipe.generate_speech( - inputs["input_ids"].to(pipes[model_id]["device"]), - speaker_embeddings, - vocoder=pipes[model_id]["vocoder"], - ) - name = str(uuid.uuid4())[:4] - sf.write( - f"public/audios/{name}.wav", speech.cpu().numpy(), samplerate=16000 - ) - result = {"path": f"/audios/{name}.wav"} - - # segmentation - if model_id == "facebook/detr-resnet-50-panoptic": - result = [] - segments = pipe(data["img_url"]) - image = load_image(data["img_url"]) - - colors = [] - for i in range(len(segments)): - colors.append( - ( - random.randint(100, 255), - random.randint(100, 255), - random.randint(100, 255), - 50, - ) - ) - - for segment in segments: - mask = segment["mask"] - mask = mask.convert("L") - layer = Image.new("RGBA", mask.size, colors[i]) - image.paste(layer, (0, 0), mask) - name = str(uuid.uuid4())[:4] - image.save(f"public/images/{name}.jpg") - result = {"path": f"/images/{name}.jpg"} - - if ( - model_id == "facebook/maskformer-swin-base-coco" - or model_id == "facebook/maskformer-swin-large-ade" - ): - image = load_image(data["img_url"]) - inputs = pipes[model_id]["feature_extractor"]( - images=image, return_tensors="pt" - ).to(pipes[model_id]["device"]) - outputs = pipe(**inputs) - result = pipes[model_id][ - "feature_extractor" - ].post_process_panoptic_segmentation( - outputs, target_sizes=[image.size[::-1]] - )[ - 0 - ] - predicted_panoptic_map = result["segmentation"].cpu().numpy() - predicted_panoptic_map = Image.fromarray( - predicted_panoptic_map.astype(np.uint8) - ) - name = str(uuid.uuid4())[:4] - predicted_panoptic_map.save(f"public/images/{name}.jpg") - result = {"path": f"/images/{name}.jpg"} - - except Exception as e: - print(e) - traceback.print_exc() - result = {"error": {"message": "Error when running the model inference."}} - - if "device" in pipes[model_id]: - try: - pipe.to("cpu") - # torch.cuda.empty_cache() - except: - pipe.device = torch.device("cpu") - pipe.model.to("cpu") - # torch.cuda.empty_cache() - - pipes[model_id]["using"] = False - - if result is None: - result = {"error": {"message": "model not found"}} - - end = time.time() - during = end - start - print(f"[ complete {model_id} ] {during}s") - print(f"[ result {model_id} ] {result}") - - return result diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/__init__.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/TRANSLATING.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/TRANSLATING.md deleted file mode 100644 index 32cd95f2ade9ba90ed6a10b1c54169b26a79d01d..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/TRANSLATING.md +++ /dev/null @@ -1,57 +0,0 @@ -### Translating the Diffusers documentation into your language - -As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language 🙏. - -**🗞️ Open an issue** - -To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "Translation template" from the "New issue" button. - -Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list. - - -**🍴 Fork the repository** - -First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page. - -Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows: - -```bash -git clone https://github.com/YOUR-USERNAME/diffusers.git -``` - -**📋 Copy-paste the English version with a new language code** - -The documentation files are in one leading directory: - -- [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language. - -You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following: - -```bash -cd ~/path/to/diffusers/docs -cp -r source/en source/LANG-ID -``` - -Here, `LANG-ID` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table. - -**✍️ Start translating** - -The fun part comes - translating the text! - -The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website. - -> 🙋 If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/LANG-ID/` directory! - -The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml): - -```yaml -- sections: - - local: pipeline_tutorial # Do not change this! Use the same name for your .md file - title: Pipelines for inference # Translate this! - ... - title: Tutorials # Translate this! -``` - -Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. - -> 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten. diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/misc.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/misc.py deleted file mode 100644 index 3e22c7b9085317b61a25c67d361f7e70df65bed1..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/misc.py +++ /dev/null @@ -1,61 +0,0 @@ -from functools import partial - -import numpy as np -import torch -from six.moves import map, zip - -from ..mask.structures import BitmapMasks, PolygonMasks - - -def multi_apply(func, *args, **kwargs): - """Apply function to a list of arguments. - - Note: - This function applies the ``func`` to multiple inputs and - map the multiple outputs of the ``func`` into different - list. Each list contains the same type of outputs corresponding - to different inputs. - - Args: - func (Function): A function that will be applied to a list of - arguments - - Returns: - tuple(list): A tuple containing multiple list, each list contains \ - a kind of returned results by the function - """ - pfunc = partial(func, **kwargs) if kwargs else func - map_results = map(pfunc, *args) - return tuple(map(list, zip(*map_results))) - - -def unmap(data, count, inds, fill=0): - """Unmap a subset of item (data) back to the original set of items (of size - count)""" - if data.dim() == 1: - ret = data.new_full((count, ), fill) - ret[inds.type(torch.bool)] = data - else: - new_size = (count, ) + data.size()[1:] - ret = data.new_full(new_size, fill) - ret[inds.type(torch.bool), :] = data - return ret - - -def mask2ndarray(mask): - """Convert Mask to ndarray.. - - Args: - mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or - torch.Tensor or np.ndarray): The mask to be converted. - - Returns: - np.ndarray: Ndarray mask of shape (n, h, w) that has been converted - """ - if isinstance(mask, (BitmapMasks, PolygonMasks)): - mask = mask.to_ndarray() - elif isinstance(mask, torch.Tensor): - mask = mask.detach().cpu().numpy() - elif not isinstance(mask, np.ndarray): - raise TypeError(f'Unsupported {type(mask)} data type') - return mask diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/accuracy.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/accuracy.py deleted file mode 100644 index 789a2240a491289c5801b6690116e8ca657d004f..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/accuracy.py +++ /dev/null @@ -1,78 +0,0 @@ -import mmcv -import torch.nn as nn - - -@mmcv.jit(coderize=True) -def accuracy(pred, target, topk=1, thresh=None): - """Calculate accuracy according to the prediction and target. - - Args: - pred (torch.Tensor): The model prediction, shape (N, num_class) - target (torch.Tensor): The target of each prediction, shape (N, ) - topk (int | tuple[int], optional): If the predictions in ``topk`` - matches the target, the predictions will be regarded as - correct ones. Defaults to 1. - thresh (float, optional): If not None, predictions with scores under - this threshold are considered incorrect. Default to None. - - Returns: - float | tuple[float]: If the input ``topk`` is a single integer, - the function will return a single float as accuracy. If - ``topk`` is a tuple containing multiple integers, the - function will return a tuple containing accuracies of - each ``topk`` number. - """ - assert isinstance(topk, (int, tuple)) - if isinstance(topk, int): - topk = (topk, ) - return_single = True - else: - return_single = False - - maxk = max(topk) - if pred.size(0) == 0: - accu = [pred.new_tensor(0.) for i in range(len(topk))] - return accu[0] if return_single else accu - assert pred.ndim == 2 and target.ndim == 1 - assert pred.size(0) == target.size(0) - assert maxk <= pred.size(1), \ - f'maxk {maxk} exceeds pred dimension {pred.size(1)}' - pred_value, pred_label = pred.topk(maxk, dim=1) - pred_label = pred_label.t() # transpose to shape (maxk, N) - correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) - if thresh is not None: - # Only prediction values larger than thresh are counted as correct - correct = correct & (pred_value > thresh).t() - res = [] - for k in topk: - correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / pred.size(0))) - return res[0] if return_single else res - - -class Accuracy(nn.Module): - - def __init__(self, topk=(1, ), thresh=None): - """Module to calculate the accuracy. - - Args: - topk (tuple, optional): The criterion used to calculate the - accuracy. Defaults to (1,). - thresh (float, optional): If not None, predictions with scores - under this threshold are considered incorrect. Default to None. - """ - super().__init__() - self.topk = topk - self.thresh = thresh - - def forward(self, pred, target): - """Forward function to calculate accuracy. - - Args: - pred (torch.Tensor): Prediction of models. - target (torch.Tensor): Target for each prediction. - - Returns: - tuple[float]: The accuracies under different topk criterions. - """ - return accuracy(pred, target, self.topk, self.thresh) diff --git a/spaces/Andy1621/uniformer_image_detection/tools/deployment/pytorch2onnx.py b/spaces/Andy1621/uniformer_image_detection/tools/deployment/pytorch2onnx.py deleted file mode 100644 index 4d380207e26e2078851b3b6264177892c6d7ac04..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/tools/deployment/pytorch2onnx.py +++ /dev/null @@ -1,244 +0,0 @@ -import argparse -import os.path as osp -import warnings - -import numpy as np -import onnx -import onnxruntime as rt -import torch -from mmcv import DictAction - -from mmdet.core import (build_model_from_cfg, generate_inputs_and_wrap_model, - preprocess_example_input) - - -def pytorch2onnx(config_path, - checkpoint_path, - input_img, - input_shape, - opset_version=11, - show=False, - output_file='tmp.onnx', - verify=False, - normalize_cfg=None, - dataset='coco', - test_img=None, - do_simplify=False, - cfg_options=None): - - input_config = { - 'input_shape': input_shape, - 'input_path': input_img, - 'normalize_cfg': normalize_cfg - } - - # prepare original model and meta for verifying the onnx model - orig_model = build_model_from_cfg( - config_path, checkpoint_path, cfg_options=cfg_options) - one_img, one_meta = preprocess_example_input(input_config) - model, tensor_data = generate_inputs_and_wrap_model( - config_path, checkpoint_path, input_config, cfg_options=cfg_options) - output_names = ['boxes'] - if model.with_bbox: - output_names.append('labels') - if model.with_mask: - output_names.append('masks') - - torch.onnx.export( - model, - tensor_data, - output_file, - input_names=['input'], - output_names=output_names, - export_params=True, - keep_initializers_as_inputs=True, - do_constant_folding=True, - verbose=show, - opset_version=opset_version) - - model.forward = orig_model.forward - - # simplify onnx model - if do_simplify: - from mmdet import digit_version - import mmcv - - min_required_version = '1.2.5' - assert digit_version(mmcv.__version__) >= digit_version( - min_required_version - ), f'Requires to install mmcv>={min_required_version}' - from mmcv.onnx.simplify import simplify - - input_dic = {'input': one_img.detach().cpu().numpy()} - _ = simplify(output_file, [input_dic], output_file) - print(f'Successfully exported ONNX model: {output_file}') - if verify: - from mmdet.core import get_classes, bbox2result - from mmdet.apis import show_result_pyplot - - ort_custom_op_path = '' - try: - from mmcv.ops import get_onnxruntime_op_path - ort_custom_op_path = get_onnxruntime_op_path() - except (ImportError, ModuleNotFoundError): - warnings.warn('If input model has custom op from mmcv, \ - you may have to build mmcv with ONNXRuntime from source.') - model.CLASSES = get_classes(dataset) - num_classes = len(model.CLASSES) - # check by onnx - onnx_model = onnx.load(output_file) - onnx.checker.check_model(onnx_model) - if test_img is not None: - input_config['input_path'] = test_img - one_img, one_meta = preprocess_example_input(input_config) - tensor_data = [one_img] - # check the numerical value - # get pytorch output - pytorch_results = model(tensor_data, [[one_meta]], return_loss=False) - pytorch_results = pytorch_results[0] - # get onnx output - input_all = [node.name for node in onnx_model.graph.input] - input_initializer = [ - node.name for node in onnx_model.graph.initializer - ] - net_feed_input = list(set(input_all) - set(input_initializer)) - assert (len(net_feed_input) == 1) - session_options = rt.SessionOptions() - # register custom op for onnxruntime - if osp.exists(ort_custom_op_path): - session_options.register_custom_ops_library(ort_custom_op_path) - sess = rt.InferenceSession(output_file, session_options) - onnx_outputs = sess.run(None, - {net_feed_input[0]: one_img.detach().numpy()}) - output_names = [_.name for _ in sess.get_outputs()] - output_shapes = [_.shape for _ in onnx_outputs] - print(f'onnxruntime output names: {output_names}, \ - output shapes: {output_shapes}') - nrof_out = len(onnx_outputs) - assert nrof_out > 0, 'Must have output' - with_mask = nrof_out == 3 - if nrof_out == 1: - onnx_results = onnx_outputs[0] - else: - det_bboxes, det_labels = onnx_outputs[:2] - onnx_results = bbox2result(det_bboxes, det_labels, num_classes) - if with_mask: - segm_results = onnx_outputs[2].squeeze(1) - cls_segms = [[] for _ in range(num_classes)] - for i in range(det_bboxes.shape[0]): - cls_segms[det_labels[i]].append(segm_results[i]) - onnx_results = (onnx_results, cls_segms) - # visualize predictions - - if show: - show_result_pyplot( - model, one_meta['show_img'], pytorch_results, title='Pytorch') - show_result_pyplot( - model, one_meta['show_img'], onnx_results, title='ONNX') - - # compare a part of result - - if with_mask: - compare_pairs = list(zip(onnx_results, pytorch_results)) - else: - compare_pairs = [(onnx_results, pytorch_results)] - for onnx_res, pytorch_res in compare_pairs: - for o_res, p_res in zip(onnx_res, pytorch_res): - np.testing.assert_allclose( - o_res, - p_res, - rtol=1e-03, - atol=1e-05, - ) - print('The numerical values are the same between Pytorch and ONNX') - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert MMDetection models to ONNX') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument('--input-img', type=str, help='Images for input') - parser.add_argument('--show', action='store_true', help='show onnx graph') - parser.add_argument('--output-file', type=str, default='tmp.onnx') - parser.add_argument('--opset-version', type=int, default=11) - parser.add_argument( - '--test-img', type=str, default=None, help='Images for test') - parser.add_argument( - '--dataset', type=str, default='coco', help='Dataset name') - parser.add_argument( - '--verify', - action='store_true', - help='verify the onnx model output against pytorch output') - parser.add_argument( - '--simplify', - action='store_true', - help='Whether to simplify onnx model.') - parser.add_argument( - '--shape', - type=int, - nargs='+', - default=[800, 1216], - help='input image size') - parser.add_argument( - '--mean', - type=float, - nargs='+', - default=[123.675, 116.28, 103.53], - help='mean value used for preprocess input data') - parser.add_argument( - '--std', - type=float, - nargs='+', - default=[58.395, 57.12, 57.375], - help='variance value used for preprocess input data') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = parse_args() - - assert args.opset_version == 11, 'MMDet only support opset 11 now' - - if not args.input_img: - args.input_img = osp.join( - osp.dirname(__file__), '../../tests/data/color.jpg') - - if len(args.shape) == 1: - input_shape = (1, 3, args.shape[0], args.shape[0]) - elif len(args.shape) == 2: - input_shape = (1, 3) + tuple(args.shape) - else: - raise ValueError('invalid input shape') - - assert len(args.mean) == 3 - assert len(args.std) == 3 - - normalize_cfg = {'mean': args.mean, 'std': args.std} - - # convert model to onnx file - pytorch2onnx( - args.config, - args.checkpoint, - args.input_img, - input_shape, - opset_version=args.opset_version, - show=args.show, - output_file=args.output_file, - verify=args.verify, - normalize_cfg=normalize_cfg, - dataset=args.dataset, - test_img=args.test_img, - do_simplify=args.simplify, - cfg_options=args.cfg_options) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py deleted file mode 100644 index 99760c36d8399204ca8e35f32690bcd369676852..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py deleted file mode 100644 index 7f8710d4be4ee0664f644b9037fd4653e4655907..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fpn_r50_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_wsl.bat b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_wsl.bat deleted file mode 100644 index f9f4348a4672d1981b2648c55b861cb0fb6f5598..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_wsl.bat +++ /dev/null @@ -1,11 +0,0 @@ -@echo off - -cd /D "%~dp0" - -set PATH=%PATH%;%SystemRoot%\system32 - -@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script -call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh cmd" - -:end -pause diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/tts_preprocessor.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/tts_preprocessor.py deleted file mode 100644 index daefdcbda6c9b20a87c6f3d84d2a759c2c51289c..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/tts_preprocessor.py +++ /dev/null @@ -1,200 +0,0 @@ -import re - -from num2words import num2words - -punctuation = r'[\s,.?!/)\'\]>]' -alphabet_map = { - "A": " Ei ", - "B": " Bee ", - "C": " See ", - "D": " Dee ", - "E": " Eee ", - "F": " Eff ", - "G": " Jee ", - "H": " Eich ", - "I": " Eye ", - "J": " Jay ", - "K": " Kay ", - "L": " El ", - "M": " Emm ", - "N": " Enn ", - "O": " Ohh ", - "P": " Pee ", - "Q": " Queue ", - "R": " Are ", - "S": " Ess ", - "T": " Tee ", - "U": " You ", - "V": " Vee ", - "W": " Double You ", - "X": " Ex ", - "Y": " Why ", - "Z": " Zed " # Zed is weird, as I (da3dsoul) am American, but most of the voice models sound British, so it matches -} - - -def preprocess(string): - # the order for some of these matter - # For example, you need to remove the commas in numbers before expanding them - string = remove_surrounded_chars(string) - string = string.replace('"', '') - string = string.replace('\u201D', '').replace('\u201C', '') # right and left quote - string = string.replace('\u201F', '') # italic looking quote - string = string.replace('\n', ' ') - string = convert_num_locale(string) - string = replace_negative(string) - string = replace_roman(string) - string = hyphen_range_to(string) - string = num_to_words(string) - - # TODO Try to use a ML predictor to expand abbreviations. It's hard, dependent on context, and whether to actually - # try to say the abbreviation or spell it out as I've done below is not agreed upon - - # For now, expand abbreviations to pronunciations - # replace_abbreviations adds a lot of unnecessary whitespace to ensure separation - string = replace_abbreviations(string) - string = replace_lowercase_abbreviations(string) - - # cleanup whitespaces - # remove whitespace before punctuation - string = re.sub(rf'\s+({punctuation})', r'\1', string) - string = string.strip() - # compact whitespace - string = ' '.join(string.split()) - - return string - - -def remove_surrounded_chars(string): - # first this expression will check if there is a string nested exclusively between a alt= - # and a style= string. This would correspond to only a the alt text of an embedded image - # If it matches it will only keep that part as the string, and rend it for further processing - # Afterwards this expression matches to 'as few symbols as possible (0 upwards) between any - # asterisks' OR' as few symbols as possible (0 upwards) between an asterisk and the end of the string' - if re.search(r'(?<=alt=)(.*)(?=style=)', string, re.DOTALL): - m = re.search(r'(?<=alt=)(.*)(?=style=)', string, re.DOTALL) - string = m.group(0) - return re.sub(r'\*[^*]*?(\*|$)', '', string) - - -def convert_num_locale(text): - # This detects locale and converts it to American without comma separators - pattern = re.compile(r'(?:\s|^)\d{1,3}(?:\.\d{3})+(,\d+)(?:\s|$)') - result = text - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start] + result[start:end].replace('.', '').replace(',', '.') + result[end:len(result)] - - # removes comma separators from existing American numbers - pattern = re.compile(r'(\d),(\d)') - result = pattern.sub(r'\1\2', result) - - return result - - -def replace_negative(string): - # handles situations like -5. -5 would become negative 5, which would then be expanded to negative five - return re.sub(rf'(\s)(-)(\d+)({punctuation})', r'\1negative \3\4', string) - - -def replace_roman(string): - # find a string of roman numerals. - # Only 2 or more, to avoid capturing I and single character abbreviations, like names - pattern = re.compile(rf'\s[IVXLCDM]{{2,}}{punctuation}') - result = string - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start + 1] + str(roman_to_int(result[start + 1:end - 1])) + result[end - 1:len(result)] - - return result - - -def roman_to_int(s): - rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} - int_val = 0 - for i in range(len(s)): - if i > 0 and rom_val[s[i]] > rom_val[s[i - 1]]: - int_val += rom_val[s[i]] - 2 * rom_val[s[i - 1]] - else: - int_val += rom_val[s[i]] - return int_val - - -def hyphen_range_to(text): - pattern = re.compile(r'(\d+)[-–](\d+)') - result = pattern.sub(lambda x: x.group(1) + ' to ' + x.group(2), text) - return result - - -def num_to_words(text): - # 1000 or 10.23 - pattern = re.compile(r'\d+\.\d+|\d+') - result = pattern.sub(lambda x: num2words(float(x.group())), text) - return result - - -def replace_abbreviations(string): - # abbreviations 1 to 4 characters long. It will get things like A and I, but those are pronounced with their letter - pattern = re.compile(rf'(^|[\s(.\'\[<])([A-Z]{{1,4}})({punctuation}|$)') - result = string - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start] + replace_abbreviation(result[start:end]) + result[end:len(result)] - - return result - - -def replace_lowercase_abbreviations(string): - # abbreviations 1 to 4 characters long, separated by dots i.e. e.g. - pattern = re.compile(rf'(^|[\s(.\'\[<])(([a-z]\.){{1,4}})({punctuation}|$)') - result = string - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start] + replace_abbreviation(result[start:end].upper()) + result[end:len(result)] - - return result - - -def replace_abbreviation(string): - result = "" - for char in string: - result += match_mapping(char) - - return result - - -def match_mapping(char): - for mapping in alphabet_map.keys(): - if char == mapping: - return alphabet_map[char] - - return char - - -def __main__(args): - print(preprocess(args[1])) - - -if __name__ == "__main__": - import sys - __main__(sys.argv) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py deleted file mode 100644 index 70cf9a838fb314e3bd3c07aadbc00921a81e83ed..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from .hook import HOOKS, Hook - - -@HOOKS.register_module() -class EmptyCacheHook(Hook): - - def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): - self._before_epoch = before_epoch - self._after_epoch = after_epoch - self._after_iter = after_iter - - def after_iter(self, runner): - if self._after_iter: - torch.cuda.empty_cache() - - def before_epoch(self, runner): - if self._before_epoch: - torch.cuda.empty_cache() - - def after_epoch(self, runner): - if self._after_epoch: - torch.cuda.empty_cache() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/appdirs.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/appdirs.py deleted file mode 100644 index ae67001af8b661373edeee2eb327b9f63e630d62..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/appdirs.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 3) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/py34compat.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/py34compat.py deleted file mode 100644 index 3ad917222a4e5bb93fe1c9e8fe1713bcab3630b6..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/py34compat.py +++ /dev/null @@ -1,13 +0,0 @@ -import importlib - -try: - import importlib.util -except ImportError: - pass - - -try: - module_from_spec = importlib.util.module_from_spec -except AttributeError: - def module_from_spec(spec): - return spec.loader.load_module(spec.name) diff --git a/spaces/Bakar31/MLOps_Practice_Repo_1/README.md b/spaces/Bakar31/MLOps_Practice_Repo_1/README.md deleted file mode 100644 index b0d23764ee9f2237ed005d41da9dab7ef8e822ac..0000000000000000000000000000000000000000 --- a/spaces/Bakar31/MLOps_Practice_Repo_1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -license: cc -title: News Summarizer -sdk: gradio -emoji: 📚 -colorFrom: indigo -colorTo: blue ---- - -# MLOps-Practice-Repo-1 - -source ~/.venv/bin/activate \ No newline at end of file diff --git a/spaces/Banbri/zcvzcv/src/lib/useImageDimension.ts b/spaces/Banbri/zcvzcv/src/lib/useImageDimension.ts deleted file mode 100644 index 9cfd06e473929b1046a5dd9caa9d577ebaf09b7a..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/lib/useImageDimension.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { useEffect, useState } from "react" - -import { ImageDimension, getImageDimension } from "./getImageDimension" - -export function useImageDimension(src: string) { - const [dimension, setDimension] = useState({ - width: 0, - height: 0, - }) - - useEffect(() => { - const compute = async () => { - const newDimension = await getImageDimension(src) - setDimension(newDimension) - } - compute() - }, [src]) - - return dimension -} \ No newline at end of file diff --git a/spaces/BartPoint/VoiceChange/infer_pack/commons.py b/spaces/BartPoint/VoiceChange/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/BartPoint/VoiceChange/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/Benson/text-generation/Examples/Car Drift Game Download Apkpure.md b/spaces/Benson/text-generation/Examples/Car Drift Game Download Apkpure.md deleted file mode 100644 index 3a7ab587d8ac3fbcc2bc1d427c71135d37918c46..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Car Drift Game Download Apkpure.md +++ /dev/null @@ -1,58 +0,0 @@ -
-

Juego de deriva de coches Descargar Apkpure: Cómo disfrutar de la deriva realista en su dispositivo Android

-

Si eres un fan de los juegos de carreras y quieres experimentar la emoción de la deriva en tu dispositivo Android, entonces deberías probar Car Drift Game. Este es un simulador de deriva realista y divertido que le permite quemar neumáticos en el asfalto y realizar acrobacias increíbles. En este artículo, le diremos qué es el juego de deriva de coches, cómo descargarlo desde Apkpure y cómo jugarlo en su dispositivo Android.

-

¿Qué es el juego de deriva de coches?

-

Car Drift Game es un popular juego de carreras que se centra en la deriva, que es una técnica de conducción en la que el conductor sobreventa intencionalmente el coche para que se deslice de lado. La deriva se utiliza a menudo en los deportes de motor y carreras callejeras, ya que puede crear efectos espectaculares y mostrar las habilidades del conductor.

-

car drift game download apkpure


DOWNLOADhttps://bltlly.com/2v6Jlx



-

Las características del juego de deriva del coche

-

Car Drift Game tiene muchas características que lo convierten en uno de los mejores juegos de deriva para dispositivos Android. Algunas de estas características son:

-
    -
  • Física realista y gráficos que simulan el comportamiento y la apariencia de los coches y pistas reales.
  • -
  • Una variedad de coches para elegir, cada uno con diferentes características y opciones de personalización.
  • -
  • Una selección de pistas a la deriva, que van desde las calles de la ciudad a las carreteras de montaña.
  • -
  • Un sistema de tiempo dinámico que afecta las condiciones de conducción y la visibilidad.
  • -
  • Un modo de reproducción que te permite ver tus derivas desde diferentes ángulos y compartirlas con tus amigos.
  • -
-

Los beneficios del juego de deriva del coche

-

Car Drift Game no es solo un juego divertido y emocionante, sino también uno beneficioso. Algunos de los beneficios de jugar Car Drift Game son:

-
    -
  • Mejora la coordinación mano-ojo y los reflejos, ya que tiene que controlar el coche y reaccionar al entorno cambiante.
  • -
  • Mejora tu creatividad y habilidades de resolución de problemas, ya que tienes que encontrar la mejor manera de la deriva y superar los obstáculos.
  • - -
  • Reduce el estrés y el aburrimiento, ya que puede sumergirse en el juego y olvidarse de sus preocupaciones.
  • -
-

¿Cómo descargar juego de deriva de coches de Apkpure?

-

Si quieres descargar Car Drift Game en tu dispositivo Android, una de las mejores fuentes es Apkpure. Apkpure es un sitio web que proporciona archivos APK libres y seguros para aplicaciones y juegos Android. Los archivos APK son los archivos de instalación para aplicaciones Android, que se pueden descargar e instalar manualmente sin usar Google Play Store.

-

Los pasos para descargar juego de deriva de coches de Apkpure

-

Para descargar Car Drift Game de Apkpure, debe seguir estos pasos:

-
    -
  1. Ir a https://apkpure.com/carx-drift-racing/com.CarXTech.CarXDriftRacingFull, que es la página oficial de Car Drift Game on Apkpure.
  2. -
  3. Haga clic en el "Descargar APK" botón, que comenzará a descargar el archivo APK de juego de deriva de coche en su dispositivo.
  4. -
  5. Una vez que la descarga se haya completado, busque el archivo APK en su dispositivo y toque en él para instalarlo. Es posible que deba habilitar "Fuentes desconocidas" en la configuración del dispositivo para permitir la instalación.
  6. -
  7. Después de la instalación en su dispositivo Android.

    -

    Los controles del juego de deriva del coche

    -

    Car Drift Game tiene controles simples e intuitivos que te permiten ir a la deriva con facilidad. Puedes elegir entre dos modos de control: tilt o touch. En el modo de inclinación, puede dirigir el coche inclinando el dispositivo a la izquierda o derecha. En el modo táctil, puede dirigir el automóvil tocando el lado izquierdo o derecho de la pantalla. También puede ajustar la sensibilidad y el ángulo de la inclinación o toque en el menú de configuración.

    -

    - -

    Para cambiar la vista de la cámara, puede tocar el icono de la cámara en la esquina superior derecha de la pantalla. Puedes elegir entre cuatro vistas de cámara: cabina, capó, parachoques y persecución. Cada vista de cámara tiene sus propias ventajas y desventajas, dependiendo de su preferencia y situación.

    -

    Los modos de juego de deriva del coche

    -

    Car Drift Game tiene tres modos para elegir: carrera, un solo jugador y multijugador. Cada modo tiene sus propios desafíos y recompensas.

    -
      -
    • Modo carrera: En este modo, puedes progresar a través de varios niveles y eventos, donde tienes que completar diferentes objetivos y ganar estrellas. Cuantas más estrellas ganes, más coches y pistas desbloquearás. También puedes actualizar tus coches y personalizar su apariencia en este modo.
    • -
    • Modo de un solo jugador: En este modo, puede practicar sus habilidades de deriva y establecer sus propios registros en cualquier pista que desee. También puede ajustar la dificultad y el número de oponentes en este modo.
    • -
    • Modo multijugador: En este modo, puede competir con otros jugadores en línea en carreras y torneos en tiempo real. También puede chatear con otros jugadores y unirse a clubes en este modo.
    • -
    -

    Los consejos y trucos del juego de deriva del coche

    -

    Car Drift Game es un juego que requiere habilidad y práctica para dominar. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu rendimiento y puntuación:

    -
      -
    • Elige un coche que se adapte a tu estilo y preferencia. Diferentes coches tienen diferentes atributos, como la velocidad, la aceleración, el manejo, el peso y la capacidad de deriva. También puede ajustar su coche para optimizar su rendimiento para la deriva.
    • -
    • Aprenda a usar el freno de mano de manera efectiva. El freno de mano es esencial para la deriva, ya que le ayuda a iniciar y controlar las derivas. Puede usarlo para entrar en las esquinas a alta velocidad, ajustar su ángulo y dirección durante las derivaciones y salir de las esquinas sin problemas.
    • - -
    • Ver su ángulo de deriva y la velocidad. El ángulo de deriva es el ángulo entre la dirección de su coche y su movimiento. La velocidad es lo rápido que se mueve su coche. Ambos factores afectan su puntuación de deriva, que se calcula multiplicando su ángulo de deriva por su velocidad. Debe apuntar a un ángulo de deriva alto y una alta velocidad para obtener una alta puntuación de deriva.
    • -
    • Práctica en diferentes pistas y condiciones. Juego de deriva de coche ofrece una variedad de pistas y condiciones para desafiar sus habilidades de deriva. Usted debe practicar en diferentes pistas y condiciones para aprender sus diseños, características y peligros. También debe adaptarse a diferentes efectos climáticos, como lluvia, nieve, niebla y noche.
    • -
    -

    Conclusión

    -

    Car Drift Game es un simulador de deriva realista y divertido que te permite quemar neumáticos en el asfalto y realizar acrobacias increíbles en tu dispositivo Android. Puede descargarlo desde Apkpure, que ofrece un 2 )">https://carx-tech.com/, o enviándoles un correo electrónico a support@carx-tech.com.

    -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Camin Simulador ltimo Para Ventanas 10.md b/spaces/Benson/text-generation/Examples/Descargar Camin Simulador ltimo Para Ventanas 10.md deleted file mode 100644 index dccb69a4ad6a2ed933c124f5de4b82348490f4c6..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Camin Simulador ltimo Para Ventanas 10.md +++ /dev/null @@ -1,97 +0,0 @@ - -

Cómo descargar Truck Simulator Ultimate para Windows 10

-

¿Te encanta conducir camiones y explorar diferentes países? ¿Quieres experimentar la emoción de dirigir tu propia empresa de transporte y gestionar tu flota? Si es así, entonces deberías probar Truck Simulator Ultimate, un juego de simulación de camiones realista e inmersivo que te permite viajar por el mundo en tu camión y completar varias misiones.

-

descargar camión simulador último para ventanas 10


Download →→→ https://bltlly.com/2v6KYw



-

En este artículo, le mostraremos lo que es Truck Simulator Ultimate, cuáles son sus características y beneficios, y cómo descargarlo para Windows 10. También compartiremos algunos consejos y trucos para jugar el juego en tu PC. ¡Empecemos!

-

¿Qué es Truck Simulator Ultimate?

-

Truck Simulator Ultimate es un juego de simulación desarrollado por Zuuks Games, los creadores de Bus Simulator : Ultimate. El juego cuenta con camiones oficiales con licencia de Mercedes-Benz y le permite transportar una amplia variedad de carga en más de 100 ciudades de todo el mundo. También puedes participar en temporadas multijugador, donde puedes llevar carga conjunta o competir en carreras con otros jugadores.

-

El juego también tiene un elemento magnate, donde puede establecer su propia empresa de transporte, contratar empleados, ampliar su flota, diseñar sus oficinas y convertirse en la empresa de logística más grande del mundo. Puede operar en diferentes países como Estados Unidos, China, Canadá, Rusia, Alemania, Italia, Francia, España, Países Bajos, Turquía, Corea del Sur, Japón, Brasil, Azerbaiyán y más.

-

-

Características de Truck Simulator Ultimate

-

Algunas de las características de Truck Simulator Ultimate son:

-
    -
  • DLC mods system: Puede personalizar sus camiones con varios accesorios como lámparas, parachoques, bocinas, luces de cabina y más.
  • -
  • Cabinas detalladas: Puede disfrutar de la física de conducción realista y los controles en la cabina de su camión.
  • - -
  • Más de 25 idiomas de soporte: Puedes jugar el juego en tu idioma preferido.
  • -
  • Más de 250 emisoras de radio: Puedes escuchar tu música favorita mientras conduces.
  • -
  • Autopistas de peaje: Puedes pagar peajes para usar carreteras más rápidas y seguras.
  • -
  • Pronóstico del tiempo realista: Puedes experimentar diferentes condiciones climáticas como lluvia, nieve, niebla, etc.
  • -
  • Pueblo, ciudad, carreteras : Puedes conducir en diferentes tipos de carreteras con tráfico y paisajes variables.
  • -
-

Beneficios de jugar Truck Simulator Ultimate en PC

-

Si bien Truck Simulator Ultimate está disponible para dispositivos Android e iOS, reproducirlo en PC tiene algunas ventajas. Estas son algunas de ellas:

-
    -
  • Mejores gráficos y rendimiento: Puedes disfrutar de los impresionantes gráficos del juego y una jugabilidad suave en una pantalla más grande y una resolución más alta.
  • -
  • Controles más fáciles: Puedes usar el teclado y el ratón para controlar tu camión con mayor comodidad y precisión.
  • -
  • Más espacio de almacenamiento: No tienes que preocuparte por quedarte sin espacio en tu dispositivo móvil, ya que puedes almacenar los archivos del juego en el disco duro de tu PC.
  • -
  • No hay pérdida de batería o sobrecalentamiento: Usted no tiene que preocuparse por la batería de su dispositivo móvil que se agota o se calienta demasiado mientras juega el juego durante largas horas.
  • -
-

Cómo descargar Truck Simulator Ultimate para Windows 10

-

Si desea jugar Truck Simulator Ultimate en su PC con Windows 10, tendrá que cumplir con algunos requisitos del sistema primero. Luego, puedes elegir entre dos métodos para descargar el juego: usando Google Play Store o usando BlueStacks App Player.

-

Requisitos del sistema para Windows 10

-

Antes de descargar Truck Simulator Ultimate para Windows 10, debe asegurarse de que su PC cumple con los siguientes requisitos mínimos del sistema:

- - - -Procesador -Memoria -Gráficos -Almacenamiento - - -Windows 10 (64 bits) -Intel Core i3-2100 o AMD FX-6300 -4 GB de RAM -NVIDIA GeForce GTX 750 Ti o AMD Radeon HD 7870 -5 GB de espacio disponible - - -

Pasos para descargar Truck Simulator Ultimate para Windows 10

-

Hay dos maneras de descargar Truck Simulator Ultimate para Windows 10: usando Google Play Store o usando BlueStacks App Player. Estos son los pasos para cada método:

-

Usando Google Play Store

-
    -
  1. Abra su navegador web y vaya a https://play.google.com/store/apps/apps/detailss?id=com.zuuks.truck.simulator.ultimate&hl=en_US&gl=US.
  2. -
  3. Haga clic en el botón Instalar e inicie sesión con su cuenta de Google.
  4. -
  5. El juego comenzará a descargar e instalar en su PC.
  6. -
  7. Una vez completada la instalación, puede iniciar el juego desde la aplicación Google Play Store o desde el acceso directo de su escritorio.
  8. -
-

Uso de BlueStacks App Player

-
    -
  1. Descargar e instalar BlueStacks App Player desde https://www.bluestacks.com/.
  2. -
  3. Inicie BlueStacks e inicie sesión con su cuenta de Google.
  4. -
  5. Vaya a la pestaña Mis aplicaciones y haga clic en el icono Google Play Store.
  6. -
  7. Busque Truck Simulator Ultimate y haga clic en el botón Instalar.
  8. -
  9. El juego comenzará a descargar e instalar en su PC.
  10. -
  11. Una vez completada la instalación, puede iniciar el juego desde la pantalla de inicio de BlueStacks o desde el acceso directo de su escritorio.
  12. -
-

Consejos y trucos para jugar Truck Simulator Ultimate en PC

-

Para aprovechar al máximo su experiencia de transporte, aquí hay algunos consejos y trucos para jugar Truck Simulator Ultimate en PC:

-
    - -
  • Alimente y repare su camión regularmente: Usted no quiere quedarse sin gasolina o romperse en el medio de la carretera. Asegúrese de revisar su medidor de combustible y el indicador de daños y parada en las gasolineras y talleres de reparación cuando sea necesario.
  • -
  • Sigue las reglas de tráfico y los límites de velocidad: No quieres ser multado o causar accidentes. Obedezca los semáforos, señales, señales y límites de velocidad. Además, evite chocar con otros vehículos, peatones u objetos.
  • -
  • Gana dinero y XP completando misiones y desafíos: Puedes ganar dinero y XP transportando carga, participando en temporadas multijugador, completando tareas diarias, logros y eventos. Puede utilizar el dinero para comprar camiones nuevos, mejorar su flota, contratar empleados y expandir su empresa. Puedes usar la XP para subir de nivel y desbloquear nuevas características y recompensas.
  • -
  • Crea un perfil realista y personaliza tu camión: Puedes crear un perfil realista eligiendo tu nombre, país, bandera, logotipo, matrícula, etc. También puedes personalizar tu camión con varios mods DLC como lámparas, parachoques, bocinas, luces de cabina, etc.
  • -
-

Conclusión

-

En conclusión, Truck Simulator Ultimate es un divertido y realista juego de simulación de camiones que le permite viajar por el mundo en su camión y ejecutar su propia empresa de transporte. Puede descargarlo para Windows 10 usando Google Play Store o BlueStacks App Player. También puede seguir algunos consejos y trucos para mejorar su juego. Esperamos que disfrute jugando Truck Simulator Ultimate en PC!

-

FAQs

Aquí hay algunas preguntas frecuentes sobre Truck Simulator Ultimate:

-
    -
  1. ¿Cómo puedo jugar Truck Simulator Ultimate con mis amigos?
  2. - -
  3. ¿Cómo puedo cambiar la vista de la cámara en Truck Simulator Ultimate?
  4. -

    Puede cambiar la vista de la cámara en Truck Simulator Ultimate presionando la tecla C del teclado. Puede elegir entre diferentes ángulos de cámara, como cabina, parte delantera, trasera, lateral, superior, etc.

    -
  5. ¿Cómo puedo guardar mi progreso en Truck Simulator Ultimate?
  6. -

    Puede guardar su progreso en Truck Simulator Ultimate iniciando sesión con su cuenta de Google. El juego sincronizará automáticamente tus datos con la nube. También puede hacer copias de seguridad de sus datos manualmente yendo al menú de configuración y haciendo clic en el botón de copia de seguridad.

    -
  7. ¿Cómo puedo actualizar Truck Simulator Ultimate en PC?
  8. -

    Puede actualizar Truck Simulator Ultimate en PC siguiendo estos pasos:

    -
      -
    • Si has descargado el juego desde Google Play Store, puedes buscar actualizaciones abriendo la aplicación Google Play Store y haciendo clic en el icono del menú. Luego, ve a Mis aplicaciones y juegos y encuentra Truck Simulator Ultimate. Si hay una actualización disponible, haga clic en el botón Update.
    • -
    • Si ha descargado el juego de BlueStacks App Player, puede comprobar si hay actualizaciones abriendo la aplicación BlueStacks y haciendo clic en el icono del menú. Luego, ve a App Center y encuentra Truck Simulator Ultimate. Si hay una actualización disponible, haga clic en el botón Update.
    • -
    -
  9. ¿Cómo puedo contactar a los desarrolladores de Truck Simulator Ultimate?
  10. -

    Puede ponerse en contacto con los desarrolladores de Truck Simulator Ultimate enviando un correo electrónico a info@zuuks.com. También puedes seguirlos en sus cuentas de redes sociales como Facebook, Twitter, Instagram y YouTube.

    -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/core.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/core.py deleted file mode 100644 index 9acba3f3e984b404f52702964805732f03965048..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/core.py +++ /dev/null @@ -1,5814 +0,0 @@ -# -# core.py -# -import os -import typing -from typing import ( - NamedTuple, - Union, - Callable, - Any, - Generator, - Tuple, - List, - TextIO, - Set, - Sequence, -) -from abc import ABC, abstractmethod -from enum import Enum -import string -import copy -import warnings -import re -import sys -from collections.abc import Iterable -import traceback -import types -from operator import itemgetter -from functools import wraps -from threading import RLock -from pathlib import Path - -from .util import ( - _FifoCache, - _UnboundedCache, - __config_flags, - _collapse_string_to_ranges, - _escape_regex_range_chars, - _bslash, - _flatten, - LRUMemo as _LRUMemo, - UnboundedMemo as _UnboundedMemo, -) -from .exceptions import * -from .actions import * -from .results import ParseResults, _ParseResultsWithOffset -from .unicode import pyparsing_unicode - -_MAX_INT = sys.maxsize -str_type: Tuple[type, ...] = (str, bytes) - -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - - -if sys.version_info >= (3, 8): - from functools import cached_property -else: - - class cached_property: - def __init__(self, func): - self._func = func - - def __get__(self, instance, owner=None): - ret = instance.__dict__[self._func.__name__] = self._func(instance) - return ret - - -class __compat__(__config_flags): - """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; - maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 - behavior - """ - - _type_desc = "compatibility" - - collect_all_And_tokens = True - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _fixed_names = """ - collect_all_And_tokens - """.split() - - -class __diag__(__config_flags): - _type_desc = "diagnostic" - - warn_multiple_tokens_in_named_alternation = False - warn_ungrouped_named_tokens_in_collection = False - warn_name_set_on_empty_Forward = False - warn_on_parse_using_empty_Forward = False - warn_on_assignment_to_Forward = False - warn_on_multiple_string_args_to_oneof = False - warn_on_match_first_with_lshift_operator = False - enable_debug_on_named_expressions = False - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _warning_names = [name for name in _all_names if name.startswith("warn")] - _debug_names = [name for name in _all_names if name.startswith("enable_debug")] - - @classmethod - def enable_all_warnings(cls) -> None: - for name in cls._warning_names: - cls.enable(name) - - -class Diagnostics(Enum): - """ - Diagnostic configuration (all default to disabled) - - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results - name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions - - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined - with a results name, but has no contents defined - - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is - defined in a grammar but has never had an expression attached to it - - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined - but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is - incorrectly called with multiple str arguments - - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent - calls to :class:`ParserElement.set_name` - - Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. - All warnings can be enabled by calling :class:`enable_all_warnings`. - """ - - warn_multiple_tokens_in_named_alternation = 0 - warn_ungrouped_named_tokens_in_collection = 1 - warn_name_set_on_empty_Forward = 2 - warn_on_parse_using_empty_Forward = 3 - warn_on_assignment_to_Forward = 4 - warn_on_multiple_string_args_to_oneof = 5 - warn_on_match_first_with_lshift_operator = 6 - enable_debug_on_named_expressions = 7 - - -def enable_diag(diag_enum: Diagnostics) -> None: - """ - Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.enable(diag_enum.name) - - -def disable_diag(diag_enum: Diagnostics) -> None: - """ - Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.disable(diag_enum.name) - - -def enable_all_warnings() -> None: - """ - Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). - """ - __diag__.enable_all_warnings() - - -# hide abstract class -del __config_flags - - -def _should_enable_warnings( - cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] -) -> bool: - enable = bool(warn_env_var) - for warn_opt in cmd_line_warn_options: - w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( - ":" - )[:5] - if not w_action.lower().startswith("i") and ( - not (w_message or w_category or w_module) or w_module == "pyparsing" - ): - enable = True - elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): - enable = False - return enable - - -if _should_enable_warnings( - sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") -): - enable_all_warnings() - - -# build list of single arg builtins, that can be used as parse actions -_single_arg_builtins = { - sum, - len, - sorted, - reversed, - list, - tuple, - set, - any, - all, - min, - max, -} - -_generatorType = types.GeneratorType -ParseAction = Union[ - Callable[[], Any], - Callable[[ParseResults], Any], - Callable[[int, ParseResults], Any], - Callable[[str, int, ParseResults], Any], -] -ParseCondition = Union[ - Callable[[], bool], - Callable[[ParseResults], bool], - Callable[[int, ParseResults], bool], - Callable[[str, int, ParseResults], bool], -] -ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] -DebugStartAction = Callable[[str, int, "ParserElement", bool], None] -DebugSuccessAction = Callable[ - [str, int, int, "ParserElement", ParseResults, bool], None -] -DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] - - -alphas = string.ascii_uppercase + string.ascii_lowercase -identchars = pyparsing_unicode.Latin1.identchars -identbodychars = pyparsing_unicode.Latin1.identbodychars -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -printables = "".join([c for c in string.printable if c not in string.whitespace]) - -_trim_arity_call_line: traceback.StackSummary = None - - -def _trim_arity(func, max_limit=3): - """decorator to trim function calls to match the arity of the target""" - global _trim_arity_call_line - - if func in _single_arg_builtins: - return lambda s, l, t: func(t) - - limit = 0 - found_arity = False - - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - # fmt: off - LINE_DIFF = 7 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) - pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) - - def wrapper(*args): - nonlocal found_arity, limit - while 1: - try: - ret = func(*args[limit:]) - found_arity = True - return ret - except TypeError as te: - # re-raise TypeErrors if they did not come from our arity testing - if found_arity: - raise - else: - tb = te.__traceback__ - trim_arity_type_error = ( - extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth - ) - del tb - - if trim_arity_type_error: - if limit < max_limit: - limit += 1 - continue - - raise - # fmt: on - - # copy func name to wrapper for sensible debug output - # (can't use functools.wraps, since that messes with function signature) - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - wrapper.__name__ = func_name - wrapper.__doc__ = func.__doc__ - - return wrapper - - -def condition_as_parse_action( - fn: ParseCondition, message: str = None, fatal: bool = False -) -> ParseAction: - """ - Function to convert a simple predicate function that returns ``True`` or ``False`` - into a parse action. Can be used in places when a parse action is required - and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition - to an operator level in :class:`infix_notation`). - - Optional keyword arguments: - - - ``message`` - define a custom message to be used in the raised exception - - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; - otherwise will raise :class:`ParseException` - - """ - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - - -def _default_start_debug_action( - instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False -): - cache_hit_str = "*" if cache_hit else "" - print( - ( - "{}Match {} at loc {}({},{})\n {}\n {}^".format( - cache_hit_str, - expr, - loc, - lineno(loc, instring), - col(loc, instring), - line(loc, instring), - " " * (col(loc, instring) - 1), - ) - ) - ) - - -def _default_success_debug_action( - instring: str, - startloc: int, - endloc: int, - expr: "ParserElement", - toks: ParseResults, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) - - -def _default_exception_debug_action( - instring: str, - loc: int, - expr: "ParserElement", - exc: Exception, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print( - "{}Match {} failed, {} raised: {}".format( - cache_hit_str, expr, type(exc).__name__, exc - ) - ) - - -def null_debug_action(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - - -class ParserElement(ABC): - """Abstract base level parser element class.""" - - DEFAULT_WHITE_CHARS: str = " \n\t\r" - verbose_stacktrace: bool = False - _literalStringClass: typing.Optional[type] = None - - @staticmethod - def set_default_whitespace_chars(chars: str) -> None: - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.set_default_whitespace_chars(" \t") - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - # update whitespace all parse expressions defined in this module - for expr in _builtin_exprs: - if expr.copyDefaultWhiteChars: - expr.whiteChars = set(chars) - - @staticmethod - def inline_literals_using(cls: type) -> None: - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inline_literals_using(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - class DebugActions(NamedTuple): - debug_try: typing.Optional[DebugStartAction] - debug_match: typing.Optional[DebugSuccessAction] - debug_fail: typing.Optional[DebugExceptionAction] - - def __init__(self, savelist: bool = False): - self.parseAction: List[ParseAction] = list() - self.failAction: typing.Optional[ParseFailAction] = None - self.customName = None - self._defaultName = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - # used when checking for left-recursion - self.mayReturnEmpty = False - self.keepTabs = False - self.ignoreExprs: List["ParserElement"] = list() - self.debug = False - self.streamlined = False - # optimize exception handling for subclasses that don't advance parse index - self.mayIndexError = True - self.errmsg = "" - # mark results names as modal (report only last) or cumulative (list all) - self.modalResults = True - # custom debug actions - self.debugActions = self.DebugActions(None, None, None) - # avoid redundant calls to preParse - self.callPreparse = True - self.callDuringTry = False - self.suppress_warnings_: List[Diagnostics] = [] - - def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": - """ - Suppress warnings emitted for a particular diagnostic on this expression. - - Example:: - - base = pp.Forward() - base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) - - # statement would normally raise a warning, but is now suppressed - print(base.parseString("x")) - - """ - self.suppress_warnings_.append(warning_type) - return self - - def copy(self) -> "ParserElement": - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - return cpy - - def set_results_name( - self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False - ) -> "ParserElement": - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - - Normally, results names are assigned as you would assign keys in a dict: - any existing value is overwritten by later values. If it is necessary to - keep all values captured for a particular results name, call ``set_results_name`` - with ``list_all_matches`` = True. - - NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.set_results_name("name")`` - - see :class:`__call__`. If ``list_all_matches`` is required, use - ``expr("name*")``. - - Example:: - - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - listAllMatches = listAllMatches or list_all_matches - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - if name is None: - return self - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def set_break(self, break_flag: bool = True) -> "ParserElement": - """ - Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to - disable. - """ - if break_flag: - _parseMethod = self._parse - - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Define one or more actions to perform when successfully matching parse element definition. - - Parse actions can be called to perform data conversions, do extra validation, - update external data structures, or enhance or replace the parsed tokens. - Each parse action ``fn`` is a callable method with 0-3 arguments, called as - ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - The parsed tokens are passed to the parse action as ParseResults. They can be - modified in place using list-style append, extend, and pop operations to update - the parsed list elements; and with dictionary-style item set and del operations - to add, update, or remove any named results. If the tokens are modified in place, - it is not necessary to return them with a return statement. - - Parse actions can also completely replace the given tokens, with another ``ParseResults`` - object, or with some entirely different object (common for parse actions that perform data - conversions). A convenient way to build a new parse result is to define the values - using a dict, and then create the return value using :class:`ParseResults.from_dict`. - - If None is passed as the ``fn`` parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - - call_during_try = (default= ``False``) indicate if parse action should be run during - lookaheads and alternate testing. For parse actions that have side effects, it is - important to only call the parse action once it is determined that it is being - called as part of a successful parse. For parse actions that perform additional - validation, then call_during_try should be passed as True, so that the validation - code is included in the preliminary "try" parses. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - # parse dates in the form YYYY/MM/DD - - # use parse action to convert toks from str to int at parse time - def convert_to_int(toks): - return int(toks[0]) - - # use a parse action to verify that the date is a valid date - def is_valid_date(instring, loc, toks): - from datetime import date - year, month, day = toks[::2] - try: - date(year, month, day) - except ValueError: - raise ParseException(instring, loc, "invalid date given") - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - # add parse actions - integer.set_parse_action(convert_to_int) - date_str.set_parse_action(is_valid_date) - - # note that integer fields are now ints, not strings - date_str.run_tests(''' - # successful parse - note that integer fields were converted to ints - 1999/12/31 - - # fail - invalid date - 1999/13/31 - ''') - """ - if list(fns) == [None]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = [_trim_arity(fn) for fn in fns] - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. - - See examples in :class:`copy`. - """ - self.parseAction += [_trim_arity(fn) for fn in fns] - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": - """Add a boolean predicate function to expression's list of parse actions. See - :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, - functions passed to ``add_condition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise - ParseException - - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, - default=False - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), - (line:1, col:1) - """ - for fn in fns: - self.parseAction.append( - condition_as_parse_action( - fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) - ) - ) - - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": - """ - Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - instrlen = len(instring) - white_chars = self.whiteChars - while loc < instrlen and instring[loc] in white_chars: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # @profile - def _parseNoCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - TRY, MATCH, FAIL = 0, 1, 2 - debugging = self.debug # and doActions) - len_instring = len(instring) - - if debugging or self.failAction: - # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) - try: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.debugActions.debug_try: - self.debugActions.debug_try(instring, tokens_start, self, False) - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except Exception as err: - # print("Exception raised:", err) - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - if self.failAction: - self.failAction(instring, tokens_start, self, err) - raise - else: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - ret_tokens = ParseResults( - tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults - ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - except Exception as err: - # print "Exception raised in user parse action:", err - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - if debugging: - # print("Matched", self, "->", ret_tokens.as_list()) - if self.debugActions.debug_match: - self.debugActions.debug_match( - instring, tokens_start, loc, self, ret_tokens, False - ) - - return loc, ret_tokens - - def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - if raise_fatal: - raise - raise ParseException(instring, loc, self.errmsg, self) - - def can_parse_next(self, instring: str, loc: int) -> bool: - try: - self.try_parse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - # cache for left-recursion in Forward references - recursion_lock = RLock() - recursion_memos: typing.Dict[ - Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] - ] = {} - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = ( - {} - ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - HIT, MISS = 0, 1 - TRY, MATCH, FAIL = 0, 1, 2 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy(), loc)) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if self.debug and self.debugActions.debug_try: - try: - self.debugActions.debug_try(instring, loc, self, cache_hit=True) - except TypeError: - pass - if isinstance(value, Exception): - if self.debug and self.debugActions.debug_fail: - try: - self.debugActions.debug_fail( - instring, loc, self, value, cache_hit=True - ) - except TypeError: - pass - raise value - - loc_, result, endloc = value[0], value[1].copy(), value[2] - if self.debug and self.debugActions.debug_match: - try: - self.debugActions.debug_match( - instring, loc_, endloc, self, result, cache_hit=True - ) - except TypeError: - pass - - return loc_, result - - _parse = _parseNoCache - - @staticmethod - def reset_cache() -> None: - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len( - ParserElement.packrat_cache_stats - ) - ParserElement.recursion_memos.clear() - - _packratEnabled = False - _left_recursion_enabled = False - - @staticmethod - def disable_memoization() -> None: - """ - Disables active Packrat or Left Recursion parsing and their memoization - - This method also works if neither Packrat nor Left Recursion are enabled. - This makes it safe to call before activating Packrat nor Left Recursion - to clear any previous settings. - """ - ParserElement.reset_cache() - ParserElement._left_recursion_enabled = False - ParserElement._packratEnabled = False - ParserElement._parse = ParserElement._parseNoCache - - @staticmethod - def enable_left_recursion( - cache_size_limit: typing.Optional[int] = None, *, force=False - ) -> None: - """ - Enables "bounded recursion" parsing, which allows for both direct and indirect - left-recursion. During parsing, left-recursive :class:`Forward` elements are - repeatedly matched with a fixed recursion depth that is gradually increased - until finding the longest match. - - Example:: - - import pyparsing as pp - pp.ParserElement.enable_left_recursion() - - E = pp.Forward("E") - num = pp.Word(pp.nums) - # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... - E <<= E + '+' - num | num - - print(E.parse_string("1+2+3")) - - Recursion search naturally memoizes matches of ``Forward`` elements and may - thus skip reevaluation of parse actions during backtracking. This may break - programs with parse actions which rely on strict ordering of side-effects. - - Parameters: - - - cache_size_limit - (default=``None``) - memoize at most this many - ``Forward`` elements during matching; if ``None`` (the default), - memoize all ``Forward`` elements. - - Bounded Recursion parsing works similar but not identical to Packrat parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._packratEnabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if cache_size_limit is None: - ParserElement.recursion_memos = _UnboundedMemo() - elif cache_size_limit > 0: - ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) - else: - raise NotImplementedError("Memo size of %s" % cache_size_limit) - ParserElement._left_recursion_enabled = True - - @staticmethod - def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: - """ - Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enable_packrat`. - For best results, call ``enable_packrat()`` immediately after - importing pyparsing. - - Example:: - - import pyparsing - pyparsing.ParserElement.enable_packrat() - - Packrat parsing works similar but not identical to Bounded Recursion parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._left_recursion_enabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parse_string( - self, instring: str, parse_all: bool = False, *, parseAll: bool = False - ) -> ParseResults: - """ - Parse a string with respect to the parser definition. This function is intended as the primary interface to the - client code. - - :param instring: The input string to be parsed. - :param parse_all: If set, the entire input string must match the grammar. - :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. - :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. - :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or - an object with attributes if the given parser includes results names. - - If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This - is also equivalent to ending the grammar with :class:`StringEnd`(). - - To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are - converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string - contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string - being parsed, one can ensure a consistent view of the input string by doing one of the following: - - - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), - - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the - parse action's ``s`` argument, or - - explicitly expand the tabs in your input string before calling ``parse_string``. - - Examples: - - By default, partial matches are OK. - - >>> res = Word('a').parse_string('aaaaabaaa') - >>> print(res) - ['aaaaa'] - - The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children - directly to see more examples. - - It raises an exception if parse_all flag is set and instring does not match the whole grammar. - - >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) - Traceback (most recent call last): - ... - pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) - """ - parseAll = parse_all or parseAll - - ParserElement.reset_cache() - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - raise exc.with_traceback(None) - else: - return tokens - - def scan_string( - self, - instring: str, - max_matches: int = _MAX_INT, - overlap: bool = False, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> Generator[Tuple[ParseResults, int, int], None, None]: - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``max_matches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parse_string` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scan_string(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - maxMatches = min(maxMatches, max_matches) - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = str(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - if debug: - print( - { - "tokens": tokens.asList(), - "start": preloc, - "end": nextLoc, - } - ) - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def transform_string(self, instring: str, *, debug: bool = False) -> str: - """ - Extension to :class:`scan_string`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transform_string``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transform_string()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transform_string()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.set_parse_action(lambda toks: toks[0].title()) - - print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out: List[str] = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transform_string and scan_string - self.keepTabs = True - try: - for t, s, e in self.scan_string(instring, debug=debug): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out.extend(t) - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join([str(s) for s in _flatten(out)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def search_string( - self, - instring: str, - max_matches: int = _MAX_INT, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> ParseResults: - """ - Another extension to :class:`scan_string`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``max_matches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - maxMatches = min(maxMatches, max_matches) - try: - return ParseResults( - [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] - ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def split( - self, - instring: str, - maxsplit: int = _MAX_INT, - include_separators: bool = False, - *, - includeSeparators=False, - ) -> Generator[str, None, None]: - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``include_separators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = one_of(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - includeSeparators = includeSeparators or include_separators - last = 0 - for t, s, e in self.scan_string(instring, max_matches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return And([self, other]) - - def __radd__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other + self - - def __sub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator, returns :class:`And` with error stop - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return self + And._ErrorStop() + other - - def __rsub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other - self - - def __mul__(self, other) -> "ParserElement": - """ - Implementation of ``*`` operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0,) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError( - "cannot multiply ParserElement and ({}) objects".format( - ",".join(type(item).__name__ for item in other) - ) - ) - else: - raise TypeError( - "cannot multiply ParserElement and {} objects".format( - type(other).__name__ - ) - ) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError( - "second tuple value must be greater or equal to first tuple value" - ) - if minElements == optElements == 0: - return And([]) - - if optElements: - - def makeOptionalList(n): - if n > 1: - return Opt(self + makeOptionalList(n - 1)) - else: - return Opt(self) - - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other) -> "ParserElement": - return self.__mul__(other) - - def __or__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return MatchFirst([self, other]) - - def __ror__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other | self - - def __xor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator - returns :class:`Or` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Or([self, other]) - - def __rxor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other ^ self - - def __and__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator - returns :class:`Each` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Each([self, other]) - - def __rand__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other & self - - def __invert__(self) -> "ParserElement": - """ - Implementation of ``~`` operator - returns :class:`NotAny` - """ - return NotAny(self) - - # disable __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - __iter__ = None - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str_type): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - raise TypeError( - "only 1 or 2 index arguments supported ({}{})".format( - key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" - ) - ) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name: str = None) -> "ParserElement": - """ - Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self) -> "ParserElement": - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Enables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. - - :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = True - return self - - def leave_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - - :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = False - return self - - def set_whitespace_chars( - self, chars: Union[Set[str], str], copy_defaults: bool = False - ) -> "ParserElement": - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = set(chars) - self.copyDefaultWhiteChars = copy_defaults - return self - - def parse_with_tabs(self) -> "ParserElement": - """ - Overrides default behavior to expand ```` s to spaces before parsing the input string. - Must be called before ``parse_string`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other: "ParserElement") -> "ParserElement": - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = Word(alphas)[1, ...] - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj'] - - patt.ignore(c_style_comment) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj', 'lskjd'] - """ - import typing - - if isinstance(other, str_type): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def set_debug_actions( - self, - start_action: DebugStartAction, - success_action: DebugSuccessAction, - exception_action: DebugExceptionAction, - ) -> "ParserElement": - """ - Customize display of debugging messages while doing pattern matching: - - - ``start_action`` - method to be called when an expression is about to be parsed; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` - - - ``success_action`` - method to be called when an expression has successfully parsed; - should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` - - - ``exception_action`` - method to be called when expression fails to parse; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` - """ - self.debugActions = self.DebugActions( - start_action or _default_start_debug_action, - success_action or _default_success_debug_action, - exception_action or _default_exception_debug_action, - ) - self.debug = True - return self - - def set_debug(self, flag: bool = True) -> "ParserElement": - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to ``True`` to enable, ``False`` to disable. - - Example:: - - wd = Word(alphas).set_name("alphaword") - integer = Word(nums).set_name("numword") - term = wd | integer - - # turn on debugging for wd - wd.set_debug() - - term[1, ...].parse_string("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`set_debug_actions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. - """ - if flag: - self.set_debug_actions( - _default_start_debug_action, - _default_success_debug_action, - _default_exception_debug_action, - ) - else: - self.debug = False - return self - - @property - def default_name(self) -> str: - if self._defaultName is None: - self._defaultName = self._generateDefaultName() - return self._defaultName - - @abstractmethod - def _generateDefaultName(self): - """ - Child classes must define this method, which defines how the ``default_name`` is set. - """ - - def set_name(self, name: str) -> "ParserElement": - """ - Define name for this expression, makes debugging and exception messages clearer. - Example:: - Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.customName = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.set_debug() - return self - - @property - def name(self) -> str: - # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name - return self.customName if self.customName is not None else self.default_name - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return str(self) - - def streamline(self) -> "ParserElement": - self.streamlined = True - self._defaultName = None - return self - - def recurse(self) -> Sequence["ParserElement"]: - return [] - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.recurse(): - e._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self._checkRecursion([]) - - def parse_file( - self, - file_or_filename: Union[str, Path, TextIO], - encoding: str = "utf-8", - parse_all: bool = False, - *, - parseAll: bool = False, - ) -> ParseResults: - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - parseAll = parseAll or parse_all - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r", encoding=encoding) as f: - file_contents = f.read() - try: - return self.parse_string(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, str_type): - return self.matches(other, parse_all=True) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __hash__(self): - return id(self) - - def matches( - self, test_string: str, parse_all: bool = True, *, parseAll: bool = True - ) -> bool: - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - ``test_string`` - to test against this expression for a match - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - parseAll = parseAll and parse_all - try: - self.parse_string(str(test_string), parse_all=parseAll) - return True - except ParseBaseException: - return False - - def run_tests( - self, - tests: Union[str, List[str]], - parse_all: bool = True, - comment: typing.Optional[Union["ParserElement", str]] = "#", - full_dump: bool = True, - print_results: bool = True, - failure_tests: bool = False, - post_parse: Callable[[str, ParseResults], str] = None, - file: typing.Optional[TextIO] = None, - with_line_numbers: bool = False, - *, - parseAll: bool = True, - fullDump: bool = True, - printResults: bool = True, - failureTests: bool = False, - postParse: Callable[[str, ParseResults], str] = None, - ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - ``tests`` - a list of separate test strings, or a multiline string of test strings - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - ``print_results`` - (default= ``True``) prints test output to stdout - - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing - - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - ``file`` - (default= ``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failure_tests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.run_tests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.run_tests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failure_tests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading ``'r'``.) - """ - from .testing import pyparsing_test - - parseAll = parseAll and parse_all - fullDump = fullDump and full_dump - printResults = printResults and print_results - failureTests = failureTests or failure_tests - postParse = postParse or post_parse - if isinstance(tests, str_type): - line_strip = type(tests).strip - tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] - if isinstance(comment, str_type): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - result: Union[ParseResults, Exception] - allResults = [] - comments = [] - success = True - NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) - BOM = "\ufeff" - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append( - pyparsing_test.with_line_numbers(t) if with_line_numbers else t - ) - continue - if not t: - continue - out = [ - "\n" + "\n".join(comments) if comments else "", - pyparsing_test.with_line_numbers(t) if with_line_numbers else t, - ] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transform_string(t.lstrip(BOM)) - result = self.parse_string(t, parse_all=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - out.append(pe.explain()) - out.append("FAIL: " + str(pe)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(pe.__traceback__)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(exc.__traceback__)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append( - "{} failed: {}: {}".format( - postParse.__name__, type(e).__name__, e - ) - ) - else: - out.append(result.dump(full=fullDump)) - out.append("") - - if printResults: - print_("\n".join(out)) - - allResults.append((t, result)) - - return success, allResults - - def create_diagram( - self, - output_html: Union[TextIO, Path, str], - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, - **kwargs, - ) -> None: - """ - Create a railroad diagram for the parser. - - Parameters: - - output_html (str or file-like object) - output target for generated - diagram HTML - - vertical (int) - threshold for formatting multiple alternatives vertically - instead of horizontally (default=3) - - show_results_names - bool flag whether diagram should show annotations for - defined results names - - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box - Additional diagram-formatting keyword arguments can also be included; - see railroad.Diagram class. - """ - - try: - from .diagram import to_railroad, railroad_to_html - except ImportError as ie: - raise Exception( - "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" - ) from ie - - self.streamline() - - railroad = to_railroad( - self, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - diagram_kwargs=kwargs, - ) - if isinstance(output_html, (str, Path)): - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad)) - else: - # we were passed a file-like object, just write to it - output_html.write(railroad_to_html(railroad)) - - setDefaultWhitespaceChars = set_default_whitespace_chars - inlineLiteralsUsing = inline_literals_using - setResultsName = set_results_name - setBreak = set_break - setParseAction = set_parse_action - addParseAction = add_parse_action - addCondition = add_condition - setFailAction = set_fail_action - tryParse = try_parse - canParseNext = can_parse_next - resetCache = reset_cache - enableLeftRecursion = enable_left_recursion - enablePackrat = enable_packrat - parseString = parse_string - scanString = scan_string - searchString = search_string - transformString = transform_string - setWhitespaceChars = set_whitespace_chars - parseWithTabs = parse_with_tabs - setDebugActions = set_debug_actions - setDebug = set_debug - defaultName = default_name - setName = set_name - parseFile = parse_file - runTests = run_tests - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr: ParserElement, must_skip: bool = False): - super().__init__() - self.anchor = expr - self.must_skip = must_skip - - def _generateDefaultName(self): - return str(self.anchor + Empty()).replace("Empty", "...") - - def __add__(self, other) -> "ParserElement": - skipper = SkipTo(other).set_name("...")("_skipped*") - if self.must_skip: - - def must_skip(t): - if not t._skipped or t._skipped.as_list() == [""]: - del t[0] - t.pop("_skipped", None) - - def show_skip(t): - if t._skipped.as_list()[-1:] == [""]: - t.pop("_skipped") - t["_skipped"] = "missing <" + repr(self.anchor) + ">" - - return ( - self.anchor + skipper().add_parse_action(must_skip) - | skipper().add_parse_action(show_skip) - ) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.defaultName - - def parseImpl(self, *args): - raise Exception( - "use of `...` expression without following SkipTo target expression" - ) - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - - def __init__(self): - super().__init__(savelist=False) - - def _generateDefaultName(self): - return type(self).__name__ - - -class Empty(Token): - """ - An empty token, will always match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - - Literal('blah').parse_string('blah') # -> ['blah'] - Literal('blah').parse_string('blahfooblah') # -> ['blah'] - Literal('blah').parse_string('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - super().__init__() - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Literal; use Empty() instead") - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith( - self.match, loc - ): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -ParserElement._literalStringClass = Literal - - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parse_string("start") # -> ['start'] - Keyword("start").parse_string("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - caseless: bool = False, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - super().__init__() - identChars = identChars or ident_chars - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Keyword; use Empty() instead") - self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = match_string.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - errmsg = self.errmsg - errloc = loc - if self.caseless: - if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: - if loc == 0 or instring[loc - 1].upper() not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - else: - if ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ( - ", keyword was immediately followed by keyword character" - ) - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - raise ParseException(instring, errloc, errmsg, self) - - @staticmethod - def set_default_keyword_chars(chars) -> None: - """ - Overrides the default characters used by :class:`Keyword` expressions. - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - - setDefaultKeywordChars = set_default_keyword_chars - - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - match_string = matchString or match_string - super().__init__(match_string.upper()) - # Preserve the defining literal. - self.returnString = match_string - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc : loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - identChars = identChars or ident_chars - match_string = matchString or match_string - super().__init__(match_string, identChars, caseless=True) - - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters - - ``max_mismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) - patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - - def __init__( - self, - match_string: str, - max_mismatches: int = None, - *, - maxMismatches: int = 1, - caseless=False, - ): - maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches - super().__init__() - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected {!r} (with up to {} mismatches)".format( - self.match_string, self.maxMismatches - ) - self.caseless = caseless - self.mayIndexError = False - self.mayReturnEmpty = False - - def _generateDefaultName(self): - return "{}:{!r}".format(type(self).__name__, self.match_string) - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate( - zip(instring[loc:maxloc], match_string) - ): - src, mat = s_m - if self.caseless: - src, mat = src.lower(), mat.lower() - - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = start + match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results["original"] = match_string - results["mismatches"] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Parameters: - - ``init_chars`` - string of all characters that should be used to - match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; - if ``body_chars`` is also specified, then this is the string of - initial characters - - ``body_chars`` - string of characters that - can be used for matching after a matched initial character as - given in ``init_chars``; if omitted, same as the initial characters - (default=``None``) - - ``min`` - minimum number of characters to match (default=1) - - ``max`` - maximum number of characters to match (default=0) - - ``exact`` - exact number of characters to match (default=0) - - ``as_keyword`` - match as a keyword (default=``False``) - - ``exclude_chars`` - characters that might be - found in the input ``body_chars`` string but which should not be - accepted for matching ;useful to define a word of all - printables except for one or two characters, for instance - (default=``None``) - - :class:`srange` is useful for defining custom character set strings - for defining :class:`Word` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - ``alphas``, ``nums``, and ``printables`` are also defined in several - Unicode sets - see :class:`pyparsing_unicode``. - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, exclude_chars=",") - """ - - def __init__( - self, - init_chars: str = "", - body_chars: typing.Optional[str] = None, - min: int = 1, - max: int = 0, - exact: int = 0, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - initChars: typing.Optional[str] = None, - bodyChars: typing.Optional[str] = None, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - initChars = initChars or init_chars - bodyChars = bodyChars or body_chars - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__() - if not initChars: - raise ValueError( - "invalid {}, initChars cannot be empty string".format( - type(self).__name__ - ) - ) - - initChars = set(initChars) - self.initChars = initChars - if excludeChars: - excludeChars = set(excludeChars) - initChars -= excludeChars - if bodyChars: - bodyChars = set(bodyChars) - excludeChars - self.initCharsOrig = "".join(sorted(initChars)) - - if bodyChars: - self.bodyCharsOrig = "".join(sorted(bodyChars)) - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = "".join(sorted(initChars)) - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - # see if we can make a regex for this Word - if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): - if self.bodyChars == self.initChars: - if max == 0: - repeat = "+" - elif max == 1: - repeat = "" - else: - repeat = "{{{},{}}}".format( - self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen - ) - self.reString = "[{}]{}".format( - _collapse_string_to_ranges(self.initChars), - repeat, - ) - elif len(self.initChars) == 1: - if max == 0: - repeat = "*" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "{}[{}]{}".format( - re.escape(self.initCharsOrig), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - else: - if max == 0: - repeat = "*" - elif max == 2: - repeat = "" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "[{}][{}]{}".format( - _collapse_string_to_ranges(self.initChars), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except re.error: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def _generateDefaultName(self): - def charsAsStr(s): - max_repr_len = 16 - s = _collapse_string_to_ranges(s, re_escape=False) - if len(s) > max_repr_len: - return s[: max_repr_len - 3] + "..." - else: - return s - - if self.initChars != self.bodyChars: - base = "W:({}, {})".format( - charsAsStr(self.initChars), charsAsStr(self.bodyChars) - ) - else: - base = "W:({})".format(charsAsStr(self.initChars)) - - # add length specification - if self.minLen > 1 or self.maxLen != _MAX_INT: - if self.minLen == self.maxLen: - if self.minLen == 1: - return base[2:] - else: - return base + "{{{}}}".format(self.minLen) - elif self.maxLen == _MAX_INT: - return base + "{{{},...}}".format(self.minLen) - else: - return base + "{{{},{}}}".format(self.minLen, self.maxLen) - return base - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if ( - start > 0 - and instring[start - 1] in bodychars - or loc < instrlen - and instring[loc] in bodychars - ): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - - def __init__( - self, - charset: str, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__( - charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars - ) - self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) - if asKeyword: - self.reString = r"\b{}\b".format(self.reString) - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named :class:`ParseResults`. - - If instead of the Python stdlib ``re`` module you wish to use a different RE module - (such as the ``regex`` module), you can do so by building your ``Regex`` object with - a compiled RE that was compiled using ``regex``. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # named fields in a regex will be returned as named results - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - - # the Regex class will accept re's compiled using the regex module - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - """ - - def __init__( - self, - pattern: Any, - flags: Union[re.RegexFlag, int] = 0, - as_group_list: bool = False, - as_match: bool = False, - *, - asGroupList: bool = False, - asMatch: bool = False, - ): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super().__init__() - asGroupList = asGroupList or as_group_list - asMatch = asMatch or as_match - - if isinstance(pattern, str_type): - if not pattern: - raise ValueError("null string passed to Regex; use Empty() instead") - - self._re = None - self.reString = self.pattern = pattern - self.flags = flags - - elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self._re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError( - "Regex may only be constructed with a string or a compiled RE object" - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - @cached_property - def re(self): - if self._re: - return self._re - else: - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError( - "invalid pattern ({!r}) passed to Regex".format(self.pattern) - ) - - @cached_property - def re_match(self): - return self.re.match - - @cached_property - def mayReturnEmpty(self): - return self.re_match("") is not None - - def _generateDefaultName(self): - return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def sub(self, repl: str) -> ParserElement: - r""" - Return :class:`Regex` with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transform_string("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - raise TypeError("cannot use sub() with Regex(asGroupList=True)") - - if self.asMatch and callable(repl): - raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") - - if self.asMatch: - - def pa(tokens): - return tokens[0].expand(repl) - - else: - - def pa(tokens): - return self.re.sub(repl, tokens[0]) - - return self.add_parse_action(pa) - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - ``quote_char`` - string of one or more characters defining the - quote delimiting string - - ``esc_char`` - character to re_escape quotes, typically backslash - (default= ``None``) - - ``esc_quote`` - special quote sequence to re_escape an embedded quote - string (such as SQL's ``""`` to re_escape an embedded ``"``) - (default= ``None``) - - ``multiline`` - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - ``unquote_results`` - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - ``end_quote_char`` - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quote_char) - - ``convert_whitespace_escapes`` - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.search_string('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', end_quote_char='}}') - print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', esc_quote='""') - print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) - - def __init__( - self, - quote_char: str = "", - esc_char: typing.Optional[str] = None, - esc_quote: typing.Optional[str] = None, - multiline: bool = False, - unquote_results: bool = True, - end_quote_char: typing.Optional[str] = None, - convert_whitespace_escapes: bool = True, - *, - quoteChar: str = "", - escChar: typing.Optional[str] = None, - escQuote: typing.Optional[str] = None, - unquoteResults: bool = True, - endQuoteChar: typing.Optional[str] = None, - convertWhitespaceEscapes: bool = True, - ): - super().__init__() - escChar = escChar or esc_char - escQuote = escQuote or esc_quote - unquoteResults = unquoteResults and unquote_results - endQuoteChar = endQuoteChar or end_quote_char - convertWhitespaceEscapes = ( - convertWhitespaceEscapes and convert_whitespace_escapes - ) - quote_char = quoteChar or quote_char - - # remove white space from quote chars - wont work anyway - quote_char = quote_char.strip() - if not quote_char: - raise ValueError("quote_char cannot be the empty string") - - if endQuoteChar is None: - endQuoteChar = quote_char - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - raise ValueError("endQuoteChar cannot be the empty string") - - self.quoteChar = quote_char - self.quoteCharLen = len(quote_char) - self.firstQuoteChar = quote_char[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - sep = "" - inner_pattern = "" - - if escQuote: - inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) - sep = "|" - - if escChar: - inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) - sep = "|" - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - - if len(self.endQuoteChar) > 1: - inner_pattern += ( - "{}(?:".format(sep) - + "|".join( - "(?:{}(?!{}))".format( - re.escape(self.endQuoteChar[:i]), - re.escape(self.endQuoteChar[i:]), - ) - for i in range(len(self.endQuoteChar) - 1, 0, -1) - ) - + ")" - ) - sep = "|" - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - inner_pattern += r"{}(?:[^{}{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - else: - self.flags = 0 - inner_pattern += r"{}(?:[^{}\n\r{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - - self.pattern = "".join( - [ - re.escape(self.quoteChar), - "(?:", - inner_pattern, - ")*", - re.escape(self.endQuoteChar), - ] - ) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except re.error: - raise ValueError( - "invalid pattern {!r} passed to Regex".format(self.pattern) - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def _generateDefaultName(self): - if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): - return "string enclosed in {!r}".format(self.quoteChar) - - return "quoted string, starting with {} ending with {}".format( - self.quoteChar, self.endQuoteChar - ) - - def parseImpl(self, instring, loc, doActions=True): - result = ( - instring[loc] == self.firstQuoteChar - and self.re_match(instring, loc) - or None - ) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen : -self.endQuoteCharLen] - - if isinstance(ret, str_type): - # replace escaped whitespace - if "\\" in ret and self.convertWhitespaceEscapes: - for wslit, wschar in self.ws_map: - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - - def __init__( - self, - not_chars: str = "", - min: int = 1, - max: int = 0, - exact: int = 0, - *, - notChars: str = "", - ): - super().__init__() - self.skipWhitespace = False - self.notChars = not_chars or notChars - self.notCharsSet = set(self.notChars) - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " - "Opt(CharsNotIn()) if zero-length char group is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = self.minLen == 0 - self.mayIndexError = False - - def _generateDefaultName(self): - not_chars_str = _collapse_string_to_ranges(self.notChars) - if len(not_chars_str) > 16: - return "!W:({}...)".format(self.notChars[: 16 - 3]) - else: - return "!W:({})".format(self.notChars) - - def parseImpl(self, instring, loc, doActions=True): - notchars = self.notCharsSet - if instring[loc] in notchars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - - whiteStrs = { - " ": "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - "\u00A0": "", - "\u1680": "", - "\u180E": "", - "\u2000": "", - "\u2001": "", - "\u2002": "", - "\u2003": "", - "\u2004": "", - "\u2005": "", - "\u2006": "", - "\u2007": "", - "\u2008": "", - "\u2009": "", - "\u200A": "", - "\u200B": "", - "\u202F": "", - "\u205F": "", - "\u3000": "", - } - - def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): - super().__init__() - self.matchWhite = ws - self.set_whitespace_chars( - "".join(c for c in self.whiteStrs if c not in self.matchWhite), - copy_defaults=True, - ) - # self.leave_whitespace() - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def _generateDefaultName(self): - return "".join(White.whiteStrs[c] for c in self.matchWhite) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class PositionToken(Token): - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class GoToColumn(PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - - def __init__(self, colno: int): - super().__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc:newloc] - return newloc, ret - - -class LineStart(PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self): - super().__init__() - self.leave_whitespace() - self.orig_whiteChars = set() | self.whiteChars - self.whiteChars.discard("\n") - self.skipper = Empty().set_whitespace_chars(self.whiteChars) - self.errmsg = "Expected start of line" - - def preParse(self, instring, loc): - if loc == 0: - return loc - else: - ret = self.skipper.preParse(instring, loc) - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - return ret - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - - -class LineEnd(PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - - def __init__(self): - super().__init__() - self.whiteChars.discard("\n") - self.set_whitespace_chars(self.whiteChars, copy_defaults=False) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class StringStart(PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class StringEnd(PositionToken): - """ - Matches if current position is at the end of the parse string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class WordStart(PositionToken): - """Matches if the current position is at the beginning of a - :class:`Word`, and is not preceded by any character in a given - set of ``word_chars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if ( - instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class WordEnd(PositionToken): - """Matches if the current position is at the end of a :class:`Word`, - and is not followed by any character in a given set of ``word_chars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if ( - instring[loc] in self.wordChars - or instring[loc - 1] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(savelist) - self.exprs: List[ParserElement] - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, str_type): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, str_type) for expr in exprs): - exprs = ( - self._literalStringClass(e) if isinstance(e, str_type) else e - for e in exprs - ) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def recurse(self) -> Sequence[ParserElement]: - return self.exprs[:] - - def append(self, other) -> ParserElement: - self.exprs.append(other) - self._defaultName = None - return self - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().leave_whitespace(recursive) - - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().ignore_whitespace(recursive) - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.exprs)) - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) - if len(self.exprs) == 2: - other = self.exprs[0] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = other.exprs[:] + [self.exprs[1]] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + str(self) - - return self - - def validate(self, validateTrace=None) -> None: - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self._checkRecursion([]) - - def copy(self) -> ParserElement: - ret = super().copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = Word(alphas)[1, ...] - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.leave_whitespace() - - def _generateDefaultName(self): - return "-" - - def __init__( - self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True - ): - exprs: List[ParserElement] = list(exprs_arg) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception( - "cannot construct And with sequence ending in ..." - ) - else: - tmp.append(expr) - exprs[:] = tmp - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - if not isinstance(self.exprs[0], White): - self.set_whitespace_chars( - self.exprs[0].whiteChars, - copy_defaults=self.exprs[0].copyDefaultWhiteChars, - ) - self.skipWhitespace = self.exprs[0].skipWhitespace - else: - self.skipWhitespace = False - else: - self.mayReturnEmpty = True - self.callPreparse = True - - def streamline(self) -> ParserElement: - # collapse any _PendingSkip's - if self.exprs: - if any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super().streamline() - - # link any IndentedBlocks to the prior expression - for prev, cur in zip(self.exprs, self.exprs[1:]): - # traverse cur or any first embedded expr of cur looking for an IndentedBlock - # (but watch out for recursive grammar) - seen = set() - while cur: - if id(cur) in seen: - break - seen.add(id(cur)) - if isinstance(cur, IndentedBlock): - prev.add_parse_action( - lambda s, l, t, cur_=cur: setattr( - cur_, "parent_anchor", col(l, s) - ) - ) - break - subs = cur.recurse() - cur = next(iter(subs), None) - - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as callPreParse arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( - instring, loc, doActions, callPreParse=False - ) - errorStop = False - for e in self.exprs[1:]: - # if isinstance(e, And._ErrorStop): - if type(e) is And._ErrorStop: - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException( - instring, len(instring), self.errmsg, self - ) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e._checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def _generateDefaultName(self): - inner = " ".join(str(e) for e in self.exprs) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "{" + inner + "}" - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - fatals = [] - if all(e.callPreparse for e in self.exprs): - loc = self.preParse(instring, loc) - for e in self.exprs: - try: - loc2 = e.try_parse(instring, loc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - maxException = None - maxExcLoc = -1 - except ParseException as err: - if not fatals: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ixor__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def _generateDefaultName(self): - return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - more than one expression matches, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - if self.exprs: - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - - for e in self.exprs: - try: - return e._parse( - instring, - loc, - doActions, - ) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - raise - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ior__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def _generateDefaultName(self): - return "{" + " | ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) - - shape_spec.run_tests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict( - (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) - ) - opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] - opt2 = [ - e - for e in self.exprs - if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) - ] - self.optionals = opt1 + opt2 - self.multioptionals = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, _MultipleMatch) - ] - self.multirequired = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, OneOrMore) - ] - self.required = [ - e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) - ] - self.required += self.multirequired - self.initExprGroups = False - - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - multis = self.multioptionals[:] - matchOrder = [] - - keepMatching = True - failed = [] - fatals = [] - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + multis - failed.clear() - fatals.clear() - for e in tmpExprs: - try: - tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - failed.append(e) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - # look for any ParseFatalExceptions - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if tmpReqd: - missing = ", ".join([str(e) for e in tmpReqd]) - raise ParseException( - instring, - loc, - "Missing one or more required elements ({})".format(missing), - ) - - # add any unmatched Opts, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] - - total_results = ParseResults([]) - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - total_results += results - - return loc, total_results - - def _generateDefaultName(self): - return "{" + " & ".join(str(e) for e in self.exprs) + "}" - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - super().__init__(savelist) - if isinstance(expr, str_type): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - elif issubclass(type(self), self._literalStringClass): - expr = Literal(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.set_whitespace_chars( - expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars - ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def recurse(self) -> Sequence[ParserElement]: - return [self.expr] if self.expr is not None else [] - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException(instring, loc, "No expression defined", self) - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - super().leave_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - super().ignore_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self) -> ParserElement: - super().streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def _checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.expr)) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class IndentedBlock(ParseElementEnhance): - """ - Expression to match one or more expressions at a given indentation level. - Useful for parsing text where structure is implied by indentation (like Python source code). - """ - - class _Indent(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) == ref_col) - - class _IndentGreater(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column greater than {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) > ref_col) - - def __init__( - self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True - ): - super().__init__(expr, savelist=True) - # if recursive: - # raise NotImplementedError("IndentedBlock with recursive is not implemented") - self._recursive = recursive - self._grouped = grouped - self.parent_anchor = 1 - - def parseImpl(self, instring, loc, doActions=True): - # advance parse position to non-whitespace by using an Empty() - # this should be the column to be used for all subsequent indented lines - anchor_loc = Empty().preParse(instring, loc) - - # see if self.expr matches at the current location - if not it will raise an exception - # and no further work is necessary - self.expr.try_parse(instring, anchor_loc, doActions) - - indent_col = col(anchor_loc, instring) - peer_detect_expr = self._Indent(indent_col) - - inner_expr = Empty() + peer_detect_expr + self.expr - if self._recursive: - sub_indent = self._IndentGreater(indent_col) - nested_block = IndentedBlock( - self.expr, recursive=self._recursive, grouped=self._grouped - ) - nested_block.set_debug(self.debug) - nested_block.parent_anchor = indent_col - inner_expr += Opt(sub_indent + nested_block) - - inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") - block = OneOrMore(inner_expr) - - trailing_undent = self._Indent(self.parent_anchor) | StringEnd() - - if self._grouped: - wrapper = Group - else: - wrapper = lambda expr: expr - return (wrapper(block) + Optional(trailing_undent)).parseImpl( - instring, anchor_loc, doActions - ) - - -class AtStringStart(ParseElementEnhance): - """Matches if expression matches at the beginning of the parse - string:: - - AtStringStart(Word(nums)).parse_string("123") - # prints ["123"] - - AtStringStart(Word(nums)).parse_string(" 123") - # raises ParseException - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - raise ParseException(instring, loc, "not found at string start") - return super().parseImpl(instring, loc, doActions) - - -class AtLineStart(ParseElementEnhance): - r"""Matches if an expression matches at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (AtLineStart('AAA') + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) != 1: - raise ParseException(instring, loc, "not found at line start") - return super().parseImpl(instring, loc, doActions) - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, :class:`Literal`, - :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` - with a specified exact or maximum length, then the retreat - parameter is not required. Otherwise, retreat must be specified to - give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - - def __init__( - self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None - ): - super().__init__(expr) - self.expr = self.expr().leave_whitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str_type): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse( - instring_slice, len(instring_slice) - offset - ) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class Located(ParseElementEnhance): - """ - Decorates a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [0, ['ljsdf'], 5] - [8, ['lksdjjf'], 15] - [18, ['lkkjj'], 23] - - """ - - def parseImpl(self, instring, loc, doActions=True): - start = loc - loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) - ret_tokens = ParseResults([start, tokens, loc]) - ret_tokens["locn_start"] = start - ret_tokens["value"] = tokens - ret_tokens["locn_end"] = loc - if self.resultsName: - # must return as a list, so that the name will be attached to the complete group - return loc, [ret_tokens] - else: - return loc, ret_tokens - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the ``'~'`` operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Opt(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - # do NOT use self.leave_whitespace(), don't want to propagate to exprs - # self.leave_whitespace() - self.skipWhitespace = False - - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.can_parse_next(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def _generateDefaultName(self): - return "~{" + str(self.expr) + "}" - - -class _MultipleMatch(ParseElementEnhance): - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr) - stopOn = stopOn or stop_on - self.saveAsList = True - ender = stopOn - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender) -> ParserElement: - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions) - try: - hasIgnoreExprs = not not self.ignoreExprs - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in [self.expr] + self.expr.recurse(): - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stop_on - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stop_on attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parse_string(text).pprint() - """ - - def _generateDefaultName(self): - return "{" + str(self.expr) + "}..." - - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``stop_on`` - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - (default= ``None``) - - Example: similar to :class:`OneOrMore` - """ - - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr, stopOn=stopOn or stop_on) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super().parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, ParseResults([], name=self.resultsName) - - def _generateDefaultName(self): - return "[" + str(self.expr) + "]..." - - -class _NullToken: - def __bool__(self): - return False - - def __str__(self): - return "" - - -class Opt(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``default`` (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) - zip.run_tests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - - __optionalNotMatched = _NullToken() - - def __init__( - self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched - ): - super().__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - self_expr = self.expr - try: - loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - default_value = self.defaultValue - if default_value is not self.__optionalNotMatched: - if self_expr.resultsName: - tokens = ParseResults([default_value]) - tokens[self_expr.resultsName] = default_value - else: - tokens = [default_value] - else: - tokens = [] - return loc, tokens - - def _generateDefaultName(self): - inner = str(self.expr) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "[" + inner + "]" - - -Optional = Opt - - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - ``expr`` - target expression marking the end of the data to be skipped - - ``include`` - if ``True``, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element - list) (default= ``False``). - - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the :class:`SkipTo` is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quoted_string) - string_data.set_parse_action(token_map(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.search_string(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: '6' - - desc: 'Intermittent system crash' - - issue_num: '101' - - sev: 'Critical' - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: '14' - - desc: "Spelling error on Login ('log|n')" - - issue_num: '94' - - sev: 'Cosmetic' - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: '47' - - desc: 'System slow when running too many reports' - - issue_num: '79' - - sev: 'Minor' - """ - - def __init__( - self, - other: Union[ParserElement, str], - include: bool = False, - ignore: bool = None, - fail_on: typing.Optional[Union[ParserElement, str]] = None, - *, - failOn: Union[ParserElement, str] = None, - ): - super().__init__(other) - failOn = failOn or fail_on - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, str_type): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - self_expr_parse = self.expr._parse - self_failOn_canParseNext = ( - self.failOn.canParseNext if self.failOn is not None else None - ) - self_ignoreExpr_tryParse = ( - self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - ) - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the ``'<<'`` operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: - - fwd_expr << a | b | c - - will actually be evaluated as:: - - (fwd_expr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwd_expr << (a | b | c) - - Converting to use the ``'<<='`` operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - - def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): - self.caller_frame = traceback.extract_stack(limit=2)[0] - super().__init__(other, savelist=False) - self.lshift_line = None - - def __lshift__(self, other): - if hasattr(self, "caller_frame"): - del self.caller_frame - if isinstance(other, str_type): - other = self._literalStringClass(other) - self.expr = other - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.set_whitespace_chars( - self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars - ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - self.lshift_line = traceback.extract_stack(limit=2)[-2] - return self - - def __ilshift__(self, other): - return self << other - - def __or__(self, other): - caller_line = traceback.extract_stack(limit=2)[-2] - if ( - __diag__.warn_on_match_first_with_lshift_operator - and caller_line == self.lshift_line - and Diagnostics.warn_on_match_first_with_lshift_operator - not in self.suppress_warnings_ - ): - warnings.warn( - "using '<<' operator with '|' is probably an error, use '<<='", - stacklevel=2, - ) - ret = super().__or__(other) - return ret - - def __del__(self): - # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' - if ( - self.expr is None - and __diag__.warn_on_assignment_to_Forward - and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ - ): - warnings.warn_explicit( - "Forward defined here but no expression attached later using '<<=' or '<<'", - UserWarning, - filename=self.caller_frame.filename, - lineno=self.caller_frame.lineno, - ) - - def parseImpl(self, instring, loc, doActions=True): - if ( - self.expr is None - and __diag__.warn_on_parse_using_empty_Forward - and Diagnostics.warn_on_parse_using_empty_Forward - not in self.suppress_warnings_ - ): - # walk stack until parse_string, scan_string, search_string, or transform_string is found - parse_fns = [ - "parse_string", - "scan_string", - "search_string", - "transform_string", - ] - tb = traceback.extract_stack(limit=200) - for i, frm in enumerate(reversed(tb), start=1): - if frm.name in parse_fns: - stacklevel = i + 1 - break - else: - stacklevel = 2 - warnings.warn( - "Forward expression was never assigned a value, will not parse any input", - stacklevel=stacklevel, - ) - if not ParserElement._left_recursion_enabled: - return super().parseImpl(instring, loc, doActions) - # ## Bounded Recursion algorithm ## - # Recursion only needs to be processed at ``Forward`` elements, since they are - # the only ones that can actually refer to themselves. The general idea is - # to handle recursion stepwise: We start at no recursion, then recurse once, - # recurse twice, ..., until more recursion offers no benefit (we hit the bound). - # - # The "trick" here is that each ``Forward`` gets evaluated in two contexts - # - to *match* a specific recursion level, and - # - to *search* the bounded recursion level - # and the two run concurrently. The *search* must *match* each recursion level - # to find the best possible match. This is handled by a memo table, which - # provides the previous match to the next level match attempt. - # - # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. - # - # There is a complication since we not only *parse* but also *transform* via - # actions: We do not want to run the actions too often while expanding. Thus, - # we expand using `doActions=False` and only run `doActions=True` if the next - # recursion level is acceptable. - with ParserElement.recursion_lock: - memo = ParserElement.recursion_memos - try: - # we are parsing at a specific recursion expansion - use it as-is - prev_loc, prev_result = memo[loc, self, doActions] - if isinstance(prev_result, Exception): - raise prev_result - return prev_loc, prev_result.copy() - except KeyError: - act_key = (loc, self, True) - peek_key = (loc, self, False) - # we are searching for the best recursion expansion - keep on improving - # both `doActions` cases must be tracked separately here! - prev_loc, prev_peek = memo[peek_key] = ( - loc - 1, - ParseException( - instring, loc, "Forward recursion without base case", self - ), - ) - if doActions: - memo[act_key] = memo[peek_key] - while True: - try: - new_loc, new_peek = super().parseImpl(instring, loc, False) - except ParseException: - # we failed before getting any match – do not hide the error - if isinstance(prev_peek, Exception): - raise - new_loc, new_peek = prev_loc, prev_peek - # the match did not get better: we are done - if new_loc <= prev_loc: - if doActions: - # replace the match for doActions=False as well, - # in case the action did backtrack - prev_loc, prev_result = memo[peek_key] = memo[act_key] - del memo[peek_key], memo[act_key] - return prev_loc, prev_result.copy() - del memo[peek_key] - return prev_loc, prev_peek.copy() - # the match did get better: see if we can improve further - else: - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = False - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = True - return self - - def streamline(self) -> ParserElement: - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - # Avoid infinite recursion by setting a temporary _defaultName - self._defaultName = ": ..." - - # Use the string representation of main expression. - retString = "..." - try: - if self.expr is not None: - retString = str(self.expr)[:1000] - else: - retString = "None" - finally: - return self.__class__.__name__ + ": " + retString - - def copy(self) -> ParserElement: - if self.expr is not None: - return super().copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, list_all_matches=False): - if ( - __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward - not in self.suppress_warnings_ - ): - if self.expr is None: - warnings.warn( - "{}: setting results name {!r} on {} expression " - "that has no contained expression".format( - "warn_name_set_on_empty_Forward", name, type(self).__name__ - ), - stacklevel=3, - ) - - return super()._setResultsName(name, list_all_matches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist=False): - super().__init__(expr) # , savelist) - self.saveAsList = False - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parse_string('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) - """ - - def __init__( - self, - expr: ParserElement, - join_string: str = "", - adjacent: bool = True, - *, - joinString: typing.Optional[str] = None, - ): - super().__init__(expr) - joinString = joinString if joinString is not None else join_string - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leave_whitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other) -> ParserElement: - if self.adjacent: - ParserElement.ignore(self, other) - else: - super().ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults( - ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults - ) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - The optional ``aslist`` argument when set to True will return the - parsed tokens as a Python list instead of a pyparsing ParseResults. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Opt(delimited_list(term)) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Opt(delimited_list(term))) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', ['a', 'b', '100']] - """ - - def __init__(self, expr: ParserElement, aslist: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonList = aslist - - def postParse(self, instring, loc, tokenlist): - if self._asPythonList: - return ParseResults.List( - tokenlist.asList() - if isinstance(tokenlist, ParseResults) - else list(tokenlist) - ) - else: - return [tokenlist] - - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - The optional ``asdict`` argument when set to True will return the - parsed tokens as a Python dict instead of a pyparsing ParseResults. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - # print attributes as plain groups - print(attr_expr[1, ...].parse_string(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names - result = Dict(Group(attr_expr)[1, ...]).parse_string(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.as_dict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - - def __init__(self, expr: ParserElement, asdict: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonDict = asdict - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - - ikey = tok[0] - if isinstance(ikey, int): - ikey = str(ikey).strip() - - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - - else: - try: - dictvalue = tok.copy() # ParseResults(i) - except Exception: - exc = TypeError( - "could not extract dict values from parsed results" - " - Dict expression must contain Grouped expressions" - ) - raise exc from None - - del dictvalue[0] - - if len(dictvalue) != 1 or ( - isinstance(dictvalue, ParseResults) and dictvalue.haskeys() - ): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self._asPythonDict: - return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - else: - return [tokenlist] if self.resultsName else tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + (',' + wd)[...] - print(wd_list1.parse_string(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + (Suppress(',') + wd)[...] - print(wd_list2.parse_string(source)) - - # Skipped text (using '...') can be suppressed as well - source = "lead in START relevant text END trailing text" - start_marker = Keyword("START") - end_marker = Keyword("END") - find_body = Suppress(...) + start_marker + ... + end_marker - print(find_body.parse_string(source) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - ['START', 'relevant text ', 'END'] - - (See also :class:`delimited_list`.) - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - if expr is ...: - expr = _PendingSkip(NoMatch()) - super().__init__(expr) - - def __add__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) + other - else: - return super().__add__(other) - - def __sub__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) - other - else: - return super().__sub__(other) - - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self) -> ParserElement: - return self - - -def trace_parse_action(f: ParseAction) -> ParseAction: - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @trace_parse_action - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = wd[1, ...].set_parse_action(remove_duplicate_chars) - print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc - sys.stderr.write( - ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) - ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< str: - r"""Helper to easily define string ranges for use in :class:`Word` - construction. Borrows syntax from regexp ``'[]'`` string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = ( - lambda p: p - if not isinstance(p, ParseResults) - else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - ) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) - except Exception: - return "" - - -def token_map(func, *args) -> ParseAction: - """Helper to define a parse action by mapping a function to all - elements of a :class:`ParseResults` list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transform_string`:: - - hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) - hex_ints.run_tests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).set_parse_action(token_map(str.upper)) - upperword[1, ...].run_tests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).set_parse_action(token_map(str.title)) - wd[1, ...].set_parse_action(' '.join).run_tests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - pa.__name__ = func_name - - return pa - - -def autoname_elements() -> None: - """ - Utility to simplify mass-naming of parser elements, for - generating railroad diagram with named subdiagrams. - """ - for name, var in sys._getframe().f_back.f_locals.items(): - if isinstance(var, ParserElement) and not var.customName: - var.set_name(name) - - -dbl_quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' -).set_name("string enclosed in double quotes") - -sgl_quoted_string = Combine( - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("string enclosed in single quotes") - -quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("quotedString using single or double quotes") - -unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") - - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - -# backward compatibility names -tokenMap = token_map -conditionAsParseAction = condition_as_parse_action -nullDebugAction = null_debug_action -sglQuotedString = sgl_quoted_string -dblQuotedString = dbl_quoted_string -quotedString = quoted_string -unicodeString = unicode_string -lineStart = line_start -lineEnd = line_end -stringStart = string_start -stringEnd = string_end -traceParseAction = trace_parse_action diff --git a/spaces/Bilalst/Gradio_Youtube_Transcript_v2/app.py b/spaces/Bilalst/Gradio_Youtube_Transcript_v2/app.py deleted file mode 100644 index c110f9c8931e0ab4885a14d0041b79743eee4eae..0000000000000000000000000000000000000000 --- a/spaces/Bilalst/Gradio_Youtube_Transcript_v2/app.py +++ /dev/null @@ -1,116 +0,0 @@ -import gradio as gr -import requests -from sentence_transformers import SentenceTransformer -from youtube_transcript_api import YouTubeTranscriptApi -import numpy as np -import huggingface_hub -import os -import faiss - -# Set up SentenceTransformer -model = SentenceTransformer('paraphrase-MiniLM-L6-v2') - - -playlist_id = 'PLD4EAA8F8C9148A1B' -api_key = 'AIzaSyBGuTvXcnliEh6yhTxugrAVM5YzcG9qr9U' - -# Make a request to the YouTube Data API to retrieve the playlist items -url = f'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={playlist_id}&key={api_key}' -video_ids = [] - -while True: - response = requests.get(url) - data = response.json() - - # Extract the video IDs from the response - for item in data['items']: - video_ids.append(item['snippet']['resourceId']['videoId']) - - # Check if there are more pages of results - if 'nextPageToken' in data: - next_page_token = data['nextPageToken'] - url = f'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={playlist_id}&key={api_key}&pageToken={next_page_token}' - else: - break - -# Empty lists to store transcripts and video IDs -transcripts = [] -ids = [] - -for video_id in video_ids: - try: - transcript = YouTubeTranscriptApi.get_transcript(video_id) - transcript_text = ' '.join([t['text'] for t in transcript]) - transcripts.append(transcript_text) - ids.append(video_id) - - except Exception as e: - print(f"Error retrieving transcript for video {video_id}: {e}") - continue - -# create sentence embeddings -sentence_embeddings = model.encode(transcripts) - -# Set up FAISS -index = faiss.IndexFlatL2(384) -# Convert list of embeddings to NumPy array -sentence_embeddings = np.array(sentence_embeddings) - -# Add sentence embeddings to FAISS index -index.add(sentence_embeddings) - - -#--------------------------------------------- - -def get_video_links(input_text): - # Encode input text using SentenceTransformer - input_embedding = model.encode([input_text])[0] - - # Perform nearest neighbor search in FAISS index - k = 15 # Number of nearest neighbors to retrieve - _, T = index.search(np.array([input_embedding]), k) # search - - # Return the list of video links with thumbnails and titles as an HTML string - video_links = [] - visited_ids = set() - for i in T[0]: - video_id = ids[i] - if video_id in visited_ids: - continue # Skip if the video_id has already been visited - visited_ids.add(video_id) - - # Retrieve video details using YouTube Data API - video_info_url = f"https://www.googleapis.com/youtube/v3/videos?part=snippet&id={video_id}&key={api_key}" - response = requests.get(video_info_url) - data = response.json() - video_title = data['items'][0]['snippet']['title'] - video_thumbnail = data['items'][0]['snippet']['thumbnails']['default']['url'] - - # Generate HTML code for the video link with thumbnail and title - video_link = f"https://www.youtube.com/watch?v={video_id}" - video_html = f'
{video_title}

' - video_links.append(video_html) - - return ''.join(video_links) - -# Create Gradio interface with "html" output type -iface = gr.Interface(fn=get_video_links, inputs=[gr.inputs.Textbox(label="Add what you are looking to find in Dr. Joe's testimonials!")], outputs="html", title="Dr. Joe Dispenza testimonials Search") - - - -# Launch the Gradio interface on Hugging Face Spaces -if __name__ == '__main__': - iface.launch() - - - - - - - - - - - - - diff --git a/spaces/Bonosa2/dall-e_image-generation/app.py b/spaces/Bonosa2/dall-e_image-generation/app.py deleted file mode 100644 index c1e4901d9252dc9ef3d122103dcab4f06b7617d8..0000000000000000000000000000000000000000 --- a/spaces/Bonosa2/dall-e_image-generation/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import gradio as gr -import openai -import urllib.request -from PIL import Image -import os -import nltk -#nltk.download('punkt') - -def generate_image(api_key, prompt, resolution): - if not api_key: - print("Error: API Key is required.") - return - openai.api_key = api_key - response = openai.Image.create( - prompt=prompt, - n=1, - size=resolution - ) - - image_url = response['data'][0]['url'] - - # Open the URL image, resize it to the chosen resolution and return it - with urllib.request.urlopen(image_url) as url: - with open('temp.jpg', 'wb') as f: - f.write(url.read()) - img = Image.open('temp.jpg') - - return img - -iface = gr.Interface( - fn=generate_image, - inputs=[ - gr.inputs.Textbox(lines=1, label="API Key", type="password"), - gr.inputs.Textbox(lines=1, label="Prompt"), - gr.inputs.Radio(choices=["256x256", "512x512", "1024x1024"], label="Resolution") - ], - outputs=gr.outputs.Image(type="pil"), - title="DALL-E Image Generator", - description="Enter your API key, a prompt, and choose a resolution to generate an image from DALL-E." -) - - -iface.launch() diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/transform_output_iterator.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/transform_output_iterator.h deleted file mode 100644 index 4c6683ae5c9b441d0c31d50d36fcabed60996b8e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/transform_output_iterator.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file thrust/iterator/transform_output_iterator.h - * \brief An output iterator which adapts another output iterator by applying a - * function to the result of its dereference before writing it. - */ - -#pragma once - -#include -#include - -namespace thrust -{ - -/*! \addtogroup iterators - * \{ - */ - -/*! \addtogroup fancyiterator Fancy Iterators - * \ingroup iterators - * \{ - */ - -/*! \p transform_output_iterator is a special kind of output iterator which - * transforms a value written upon dereference. This iterator is useful - * for transforming an output from algorithms without explicitly storing the - * intermediate result in the memory and applying subsequent transformation, - * thereby avoiding wasting memory capacity and bandwidth. - * Using \p transform_iterator facilitates kernel fusion by deferring execution - * of transformation until the value is written while saving both memory - * capacity and bandwidth. - * - * The following code snippet demonstrated how to create a - * \p transform_output_iterator which applies \c sqrtf to the assigning value. - * - * \code - * #include - * #include - * - * // note: functor inherits form unary function - * // note: functor inherits from unary_function - * struct square_root : public thrust::unary_function - * { - * __host__ __device__ - * float operator()(float x) const - * { - * return sqrtf(x); - * } - * }; - * - * int main() - * { - * thrust::device_vector v(4); - * - * typedef thrust::device_vector::iterator FloatIterator; - * thrust::transform_output_iterator iter(v.begin(), square_root()); - * - * iter[0] = 1.0f; // stores sqrtf( 1.0f) - * iter[1] = 4.0f; // stores sqrtf( 4.0f) - * iter[2] = 9.0f; // stores sqrtf( 9.0f) - * iter[3] = 16.0f; // stores sqrtf(16.0f) - * // iter[4] is an out-of-bounds error - * - * v[0]; // returns 1.0f; - * v[1]; // returns 2.0f; - * v[2]; // returns 3.0f; - * v[3]; // returns 4.0f; - * - * } - * \endcode - * - * \see make_transform_output_iterator - */ - -template - class transform_output_iterator - : public detail::transform_output_iterator_base::type -{ - - /*! \cond - */ - - public: - - typedef typename - detail::transform_output_iterator_base::type - super_t; - - friend class thrust::iterator_core_access; - /*! \endcond - */ - - /*! This constructor takes as argument an \c OutputIterator and an \c - * UnaryFunction and copies them to a new \p transform_output_iterator - * - * \param out An \c OutputIterator pointing to the output range whereto the result of - * \p transform_output_iterator's \c UnaryFunction will be written. - * \param fun An \c UnaryFunction used to transform the objects assigned to - * this \p transform_output_iterator. - */ - __host__ __device__ - transform_output_iterator(OutputIterator const& out, UnaryFunction fun) : super_t(out), fun(fun) - { - } - - /*! \cond - */ - private: - - __host__ __device__ - typename super_t::reference dereference() const - { - return detail::transform_output_iterator_proxy< - UnaryFunction, OutputIterator - >(this->base_reference(), fun); - } - - UnaryFunction fun; - - /*! \endcond - */ -}; // end transform_output_iterator - -/*! \p make_transform_output_iterator creates a \p transform_output_iterator from - * an \c OutputIterator and \c UnaryFunction. - * - * \param out The \c OutputIterator pointing to the output range of the newly - * created \p transform_output_iterator - * \param fun The \c UnaryFunction transform the object before assigning it to - * \c out by the newly created \p transform_output_iterator - * \see transform_output_iterator - */ -template -transform_output_iterator -__host__ __device__ -make_transform_output_iterator(OutputIterator out, UnaryFunction fun) -{ - return transform_output_iterator(out, fun); -} // end make_transform_output_iterator - -/*! \} // end fancyiterators - */ - -/*! \} // end iterators - */ - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/zip_iterator.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/zip_iterator.h deleted file mode 100644 index 7b86d06d513253c5c89dd1d88ef508bbc2a3684f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/zip_iterator.h +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file thrust/iterator/zip_iterator.h - * \brief An iterator which returns a tuple of the result of dereferencing - * a tuple of iterators when dereferenced - */ - -/* - * Copyright David Abrahams and Thomas Becker 2000-2006. - * - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying NOTICE file for the complete license) - * - * For more information, see http://www.boost.org - */ - -#pragma once - -#include -#include -#include -#include - -namespace thrust -{ - -/*! \addtogroup iterators - * \{ - */ - -/*! \addtogroup fancyiterator Fancy Iterators - * \ingroup iterators - * \{ - */ - -/*! \p zip_iterator is an iterator which represents a pointer into a range - * of \p tuples whose elements are themselves taken from a \p tuple of input - * iterators. This iterator is useful for creating a virtual array of structures - * while achieving the same performance and bandwidth as the structure of arrays - * idiom. \p zip_iterator also facilitates kernel fusion by providing a convenient - * means of amortizing the execution of the same operation over multiple ranges. - * - * The following code snippet demonstrates how to create a \p zip_iterator - * which represents the result of "zipping" multiple ranges together. - * - * \code - * #include - * #include - * #include - * ... - * thrust::device_vector int_v(3); - * int_v[0] = 0; int_v[1] = 1; int_v[2] = 2; - * - * thrust::device_vector float_v(3); - * float_v[0] = 0.0f; float_v[1] = 1.0f; float_v[2] = 2.0f; - * - * thrust::device_vector char_v(3); - * char_v[0] = 'a'; char_v[1] = 'b'; char_v[2] = 'c'; - * - * // typedef these iterators for shorthand - * typedef thrust::device_vector::iterator IntIterator; - * typedef thrust::device_vector::iterator FloatIterator; - * typedef thrust::device_vector::iterator CharIterator; - * - * // typedef a tuple of these iterators - * typedef thrust::tuple IteratorTuple; - * - * // typedef the zip_iterator of this tuple - * typedef thrust::zip_iterator ZipIterator; - * - * // finally, create the zip_iterator - * ZipIterator iter(thrust::make_tuple(int_v.begin(), float_v.begin(), char_v.begin())); - * - * *iter; // returns (0, 0.0f, 'a') - * iter[0]; // returns (0, 0.0f, 'a') - * iter[1]; // returns (1, 1.0f, 'b') - * iter[2]; // returns (2, 2.0f, 'c') - * - * thrust::get<0>(iter[2]); // returns 2 - * thrust::get<1>(iter[0]); // returns 0.0f - * thrust::get<2>(iter[1]); // returns 'b' - * - * // iter[3] is an out-of-bounds error - * \endcode - * - * Defining the type of a \p zip_iterator can be complex. The next code example demonstrates - * how to use the \p make_zip_iterator function with the \p make_tuple function to avoid - * explicitly specifying the type of the \p zip_iterator. This example shows how to use - * \p zip_iterator to copy multiple ranges with a single call to \p thrust::copy. - * - * \code - * #include - * #include - * #include - * - * int main() - * { - * thrust::device_vector int_in(3), int_out(3); - * int_in[0] = 0; - * int_in[1] = 1; - * int_in[2] = 2; - * - * thrust::device_vector float_in(3), float_out(3); - * float_in[0] = 0.0f; - * float_in[1] = 10.0f; - * float_in[2] = 20.0f; - * - * thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(int_in.begin(), float_in.begin())), - * thrust::make_zip_iterator(thrust::make_tuple(int_in.end(), float_in.end())), - * thrust::make_zip_iterator(thrust::make_tuple(int_out.begin(),float_out.begin()))); - * - * // int_out is now [0, 1, 2] - * // float_out is now [0.0f, 10.0f, 20.0f] - * - * return 0; - * } - * \endcode - * - * \see make_zip_iterator - * \see make_tuple - * \see tuple - * \see get - */ -template - class zip_iterator - : public detail::zip_iterator_base::type -{ - public: - /*! Null constructor does nothing. - */ - inline __host__ __device__ - zip_iterator(); - - /*! This constructor creates a new \p zip_iterator from a - * \p tuple of iterators. - * - * \param iterator_tuple The \p tuple of iterators to copy from. - */ - inline __host__ __device__ - zip_iterator(IteratorTuple iterator_tuple); - - /*! This copy constructor creates a new \p zip_iterator from another - * \p zip_iterator. - * - * \param other The \p zip_iterator to copy. - */ - template - inline __host__ __device__ - zip_iterator(const zip_iterator &other, - typename thrust::detail::enable_if_convertible< - OtherIteratorTuple, - IteratorTuple - >::type * = 0); - - /*! This method returns a \c const reference to this \p zip_iterator's - * \p tuple of iterators. - * - * \return A \c const reference to this \p zip_iterator's \p tuple - * of iterators. - */ - inline __host__ __device__ - const IteratorTuple &get_iterator_tuple() const; - - /*! \cond - */ - private: - typedef typename - detail::zip_iterator_base::type super_t; - - friend class thrust::iterator_core_access; - - // Dereferencing returns a tuple built from the dereferenced - // iterators in the iterator tuple. - __host__ __device__ - typename super_t::reference dereference() const; - - // Two zip_iterators are equal if the two first iterators of the - // tuple are equal. Note this differs from Boost's implementation, which - // considers the entire tuple. - template - inline __host__ __device__ - bool equal(const zip_iterator &other) const; - - // Advancing a zip_iterator means to advance all iterators in the tuple - inline __host__ __device__ - void advance(typename super_t::difference_type n); - - // Incrementing a zip iterator means to increment all iterators in the tuple - inline __host__ __device__ - void increment(); - - // Decrementing a zip iterator means to decrement all iterators in the tuple - inline __host__ __device__ - void decrement(); - - // Distance is calculated using the first iterator in the tuple. - template - inline __host__ __device__ - typename super_t::difference_type - distance_to(const zip_iterator &other) const; - - // The iterator tuple. - IteratorTuple m_iterator_tuple; - - /*! \endcond - */ -}; // end zip_iterator - -/*! \p make_zip_iterator creates a \p zip_iterator from a \p tuple - * of iterators. - * - * \param t The \p tuple of iterators to copy. - * \return A newly created \p zip_iterator which zips the iterators encapsulated in \p t. - * - * \see zip_iterator - */ -template -inline __host__ __device__ -zip_iterator make_zip_iterator(IteratorTuple t); - -/*! \} // end fancyiterators - */ - -/*! \} // end iterators - */ - -} // end thrust - -#include - diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/free_anchor_retina_head.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/free_anchor_retina_head.py deleted file mode 100644 index 79879fdc3171b8e34b606b27eb1ceb67f4473e3e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/dense_heads/free_anchor_retina_head.py +++ /dev/null @@ -1,270 +0,0 @@ -import torch -import torch.nn.functional as F - -from mmdet.core import bbox_overlaps -from ..builder import HEADS -from .retina_head import RetinaHead - -EPS = 1e-12 - - -@HEADS.register_module() -class FreeAnchorRetinaHead(RetinaHead): - """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of conv layers in cls and reg tower. - Default: 4. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, - requires_grad=True). - pre_anchor_topk (int): Number of boxes that be token in each bag. - bbox_thr (float): The threshold of the saturated linear function. It is - usually the same with the IoU threshold used in NMS. - gamma (float): Gamma parameter in focal loss. - alpha (float): Alpha parameter in focal loss. - """ # noqa: W605 - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - pre_anchor_topk=50, - bbox_thr=0.6, - gamma=2.0, - alpha=0.5, - **kwargs): - super(FreeAnchorRetinaHead, - self).__init__(num_classes, in_channels, stacked_convs, conv_cfg, - norm_cfg, **kwargs) - - self.pre_anchor_topk = pre_anchor_topk - self.bbox_thr = bbox_thr - self.gamma = gamma - self.alpha = alpha - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == len(self.anchor_generator.base_anchors) - - anchor_list, _ = self.get_anchors(featmap_sizes, img_metas) - anchors = [torch.cat(anchor) for anchor in anchor_list] - - # concatenate each level - cls_scores = [ - cls.permute(0, 2, 3, - 1).reshape(cls.size(0), -1, self.cls_out_channels) - for cls in cls_scores - ] - bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) - for bbox_pred in bbox_preds - ] - cls_scores = torch.cat(cls_scores, dim=1) - bbox_preds = torch.cat(bbox_preds, dim=1) - - cls_prob = torch.sigmoid(cls_scores) - box_prob = [] - num_pos = 0 - positive_losses = [] - for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, - bbox_preds_) in enumerate( - zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): - - with torch.no_grad(): - if len(gt_bboxes_) == 0: - image_box_prob = torch.zeros( - anchors_.size(0), - self.cls_out_channels).type_as(bbox_preds_) - else: - # box_localization: a_{j}^{loc}, shape: [j, 4] - pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) - - # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] - object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) - - # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] - t1 = self.bbox_thr - t2 = object_box_iou.max( - dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) - object_box_prob = ((object_box_iou - t1) / - (t2 - t1)).clamp( - min=0, max=1) - - # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] - num_obj = gt_labels_.size(0) - indices = torch.stack([ - torch.arange(num_obj).type_as(gt_labels_), gt_labels_ - ], - dim=0) - object_cls_box_prob = torch.sparse_coo_tensor( - indices, object_box_prob) - - # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] - """ - from "start" to "end" implement: - image_box_iou = torch.sparse.max(object_cls_box_prob, - dim=0).t() - - """ - # start - box_cls_prob = torch.sparse.sum( - object_cls_box_prob, dim=0).to_dense() - - indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() - if indices.numel() == 0: - image_box_prob = torch.zeros( - anchors_.size(0), - self.cls_out_channels).type_as(object_box_prob) - else: - nonzero_box_prob = torch.where( - (gt_labels_.unsqueeze(dim=-1) == indices[0]), - object_box_prob[:, indices[1]], - torch.tensor([ - 0 - ]).type_as(object_box_prob)).max(dim=0).values - - # upmap to shape [j, c] - image_box_prob = torch.sparse_coo_tensor( - indices.flip([0]), - nonzero_box_prob, - size=(anchors_.size(0), - self.cls_out_channels)).to_dense() - # end - - box_prob.append(image_box_prob) - - # construct bags for objects - match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) - _, matched = torch.topk( - match_quality_matrix, - self.pre_anchor_topk, - dim=1, - sorted=False) - del match_quality_matrix - - # matched_cls_prob: P_{ij}^{cls} - matched_cls_prob = torch.gather( - cls_prob_[matched], 2, - gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, - 1)).squeeze(2) - - # matched_box_prob: P_{ij}^{loc} - matched_anchors = anchors_[matched] - matched_object_targets = self.bbox_coder.encode( - matched_anchors, - gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) - loss_bbox = self.loss_bbox( - bbox_preds_[matched], - matched_object_targets, - reduction_override='none').sum(-1) - matched_box_prob = torch.exp(-loss_bbox) - - # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} - num_pos += len(gt_bboxes_) - positive_losses.append( - self.positive_bag_loss(matched_cls_prob, matched_box_prob)) - positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) - - # box_prob: P{a_{j} \in A_{+}} - box_prob = torch.stack(box_prob, dim=0) - - # negative_loss: - # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| - negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( - 1, num_pos * self.pre_anchor_topk) - - # avoid the absence of gradients in regression subnet - # when no ground-truth in a batch - if num_pos == 0: - positive_loss = bbox_preds.sum() * 0 - - losses = { - 'positive_bag_loss': positive_loss, - 'negative_bag_loss': negative_loss - } - return losses - - def positive_bag_loss(self, matched_cls_prob, matched_box_prob): - """Compute positive bag loss. - - :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. - - :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. - - :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. - - Args: - matched_cls_prob (Tensor): Classification probabilty of matched - samples in shape (num_gt, pre_anchor_topk). - matched_box_prob (Tensor): BBox probability of matched samples, - in shape (num_gt, pre_anchor_topk). - - Returns: - Tensor: Positive bag loss in shape (num_gt,). - """ # noqa: E501, W605 - # bag_prob = Mean-max(matched_prob) - matched_prob = matched_cls_prob * matched_box_prob - weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) - weight /= weight.sum(dim=1).unsqueeze(dim=-1) - bag_prob = (weight * matched_prob).sum(dim=1) - # positive_bag_loss = -self.alpha * log(bag_prob) - return self.alpha * F.binary_cross_entropy( - bag_prob, torch.ones_like(bag_prob), reduction='none') - - def negative_bag_loss(self, cls_prob, box_prob): - """Compute negative bag loss. - - :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. - - :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. - - :math:`P_{j}^{bg}`: Classification probability of negative samples. - - Args: - cls_prob (Tensor): Classification probability, in shape - (num_img, num_anchors, num_classes). - box_prob (Tensor): Box probability, in shape - (num_img, num_anchors, num_classes). - - Returns: - Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). - """ # noqa: E501, W605 - prob = cls_prob * (1 - box_prob) - # There are some cases when neg_prob = 0. - # This will cause the neg_prob.log() to be inf without clamp. - prob = prob.clamp(min=EPS, max=1 - EPS) - negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( - prob, torch.zeros_like(prob), reduction='none') - return (1 - self.alpha) * negative_bag_loss diff --git a/spaces/CVPR/WALT/walt/datasets/walt_3d.py b/spaces/CVPR/WALT/walt/datasets/walt_3d.py deleted file mode 100644 index 140afe8b9e3b69a5254e6ea4721aed546f83ddc7..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/walt/datasets/walt_3d.py +++ /dev/null @@ -1,535 +0,0 @@ -import itertools -import logging -import os.path as osp -import tempfile -from collections import OrderedDict - -import mmcv -import numpy as np -import pycocotools -from mmcv.utils import print_log -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval -from terminaltables import AsciiTable - -from mmdet.core import eval_recalls -from .builder import DATASETS -from .custom import CustomDatasetLocal - - -def bounding_box(points): - """returns a list containing the bottom left and the top right - points in the sequence - Here, we traverse the collection of points only once, - to find the min and max for x and y - """ - bot_left_x, bot_left_y = float('inf'), float('inf') - top_right_x, top_right_y = float('-inf'), float('-inf') - for point in points: - x = point[0] - y = point[1] - if x < 0 or y < 0: - continue - bot_left_x = min(bot_left_x, x) - bot_left_y = min(bot_left_y, y) - top_right_x = max(top_right_x, x) - top_right_y = max(top_right_y, y) - - return [bot_left_x, bot_left_y, top_right_x, top_right_y] - -lines = [[0,1],[1,3],[0,2],[3,2],[0,4],[1,5],[2,6],[3,7],[4,5],[5,7],[4,6],[7,6]] - -def get_boundingbox2d3d(cameraname, gt_data, extrinsics_path): - f = open(extrinsics_path,"r") - while True: - a = f.readline() - print(cameraname, a.split('\n')[0].split(' ')[0]) - if cameraname in a.split('\n')[0].split(' ')[0]: - a = a.split('\n')[0].split(' ') - break - - K = np.reshape(np.array(a[1:10]),[3,3]).astype(float) - R = np.reshape(a[10:19], [3,3]) - T = np.array([[a[19]],[a[20]],[a[21]]]) - RT = np.hstack((R,T)).astype(float) - KRT = np.matmul(K, RT) - bb_3d_connected = [] - bb_3d_all = [] - bb_2d_all = [] - bb_3d_proj_all = [] - - for indice, keypoints_3d in enumerate(gt_data['arr_0'][1]): - parking_space = gt_data['arr_0'][0][indice][0] - - if gt_data['arr_0'][0][indice][1] == 0: - continue - points2d_all = [] - parking_space = np.vstack([parking_space, parking_space+[0,0,2]]) - parking_space_tranformed = [] - for point in parking_space: - point = [point[0], point[1], point[2], 1] - point = np.matmul(RT, point) - parking_space_tranformed.append(list(point)) - point2d = np.matmul(K, point) - if point2d[2] < 0: - points2d_all.append([-100,-100,1]) - continue - point2d = point2d/point2d[2] - if point2d[0] < 0 or point2d[0] >2048: - points2d_all.append([-100,-100,1]) - continue - if point2d[1] < 0 or point2d[1] >2048: - points2d_all.append([-100,-100,1]) - continue - - points2d_all.append(point2d) - - bb_3d_proj_all.append(points2d_all) - bbox = bounding_box(points2d_all) - if float('inf') in bbox: - continue - bb_2d_all.append(bbox) - bb_3d_all.append(parking_space) - #for line in lines: - # bb_3d_connected.append(parking_space[line[0]]) - # bb_3d_connected.append(parking_space[line[1]]) - #asas - return bb_3d_all, bb_2d_all, bb_3d_proj_all - - -@DATASETS.register_module() -class Walt3DDataset(CustomDatasetLocal): - - CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', - 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', - 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', - 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', - 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', - 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', - 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', - 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', - 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', - 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') - - def load_annotations(self, ann_file): - import glob - count = 0 - data_infos = [] - self.data_annotations = [] - for i in glob.glob(ann_file + '*'): - gt_data = np.load(i , allow_pickle = True) - for img_folder in glob.glob(ann_file.replace('GT_data','images') + '/*'): - cam_name = img_folder.split('/')[-1] - img_name = i.split('/')[-1].replace('.npz','.png') - info = dict(license=3, height=2048, width=2048, file_name = cam_name+'/' + img_name, date_captured = i.split('/')[-1].split('.')[0], id = count, filename = cam_name+'/' + img_name) - - #info = dict(license=3, height=2048, width=2048, file_name = i.split('/')[-1].replace('.npz','.png'), date_captured = i.split('/')[-1].split('.')[0], id = count, filename = i.split('/')[-1].replace('.npz','.png')) - count = count+1 - data_infos.append(info) - bb_3d_all, bb_2d_all, bb_3d_proj_all = get_boundingbox2d3d(cam_name, gt_data, ann_file.replace('GT_data','Extrinsics') + '/frame_par.txt') - self.data_annotations.append([bb_3d_all, bb_2d_all, bb_3d_proj_all]) - break - return data_infos - - - def get_ann_info(self, idx): - data = self.data_annotations[idx] - gt_bboxes = np.array(data[1]) - gt_bboxes_3d = np.array(data[0]) - gt_bboxes_3d_proj = np.array(data[2]) - - - ann = dict( - bboxes=gt_bboxes, - bboxes_3d = gt_bboxes_3d, - bboxes_3d_proj = gt_bboxes_3d_proj, - labels = (np.zeros(len(gt_bboxes))+2).astype(int), - bboxes_ignore=np.zeros((0, 4), dtype=np.float32), - #masks=np.array([]), - seg_map=np.array([])) - return ann - - def get_cat_ids(self, idx): - data = self.data_annotations[idx] - gt_bboxes = np.array(data[1]) - return (np.zeros(len(gt_bboxes))+2).astype(int) - - - def _filter_imgs(self, min_size=32): - """Filter images too small or without ground truths.""" - valid_inds = [] - for data_info in self.data_infos: - valid_inds.append(data_info['id']) - print(valid_inds) - - return valid_inds - - - def xyxy2xywh(self, bbox): - """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO - evaluation. - - Args: - bbox (numpy.ndarray): The bounding boxes, shape (4, ), in - ``xyxy`` order. - - Returns: - list[float]: The converted bounding boxes, in ``xywh`` order. - """ - - _bbox = bbox.tolist() - return [ - _bbox[0], - _bbox[1], - _bbox[2] - _bbox[0], - _bbox[3] - _bbox[1], - ] - - def _proposal2json(self, results): - """Convert proposal results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - bboxes = results[idx] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = 1 - json_results.append(data) - return json_results - - def _det2json(self, results): - """Convert detection results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - result = results[idx] - for label in range(len(result)): - bboxes = result[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - json_results.append(data) - return json_results - - def _segm2json(self, results): - """Convert instance segmentation results to COCO json style.""" - bbox_json_results = [] - segm_json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - det, seg = results[idx] - for label in range(len(det)): - # bbox results - bboxes = det[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - bbox_json_results.append(data) - - # segm results - # some detectors use different scores for bbox and mask - if isinstance(seg, tuple): - segms = seg[0][label] - mask_score = seg[1][label] - else: - segms = seg[label] - mask_score = [bbox[4] for bbox in bboxes] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(mask_score[i]) - data['category_id'] = self.cat_ids[label] - if isinstance(segms[i]['counts'], bytes): - segms[i]['counts'] = segms[i]['counts'].decode() - data['segmentation'] = segms[i] - segm_json_results.append(data) - return bbox_json_results, segm_json_results - - def results2json(self, results, outfile_prefix): - """Dump the detection results to a COCO style json file. - - There are 3 types of results: proposals, bbox predictions, mask - predictions, and they have different data types. This method will - automatically recognize the type, and dump them to json files. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.bbox.json", "somepath/xxx.segm.json", - "somepath/xxx.proposal.json". - - Returns: - dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ - values are corresponding filenames. - """ - result_files = dict() - if isinstance(results[0], list): - json_results = self._det2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - mmcv.dump(json_results, result_files['bbox']) - elif isinstance(results[0], tuple): - json_results = self._segm2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - result_files['segm'] = f'{outfile_prefix}.segm.json' - mmcv.dump(json_results[0], result_files['bbox']) - mmcv.dump(json_results[1], result_files['segm']) - elif isinstance(results[0], np.ndarray): - json_results = self._proposal2json(results) - result_files['proposal'] = f'{outfile_prefix}.proposal.json' - mmcv.dump(json_results, result_files['proposal']) - else: - raise TypeError('invalid type of results') - return result_files - - def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): - gt_bboxes = [] - for i in range(len(self.img_ids)): - ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) - ann_info = self.coco.load_anns(ann_ids) - if len(ann_info) == 0: - gt_bboxes.append(np.zeros((0, 4))) - continue - bboxes = [] - for ann in ann_info: - if ann.get('ignore', False) or ann['iscrowd']: - continue - x1, y1, w, h = ann['bbox'] - bboxes.append([x1, y1, x1 + w, y1 + h]) - bboxes = np.array(bboxes, dtype=np.float32) - if bboxes.shape[0] == 0: - bboxes = np.zeros((0, 4)) - gt_bboxes.append(bboxes) - - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) - ar = recalls.mean(axis=1) - return ar - - def format_results(self, results, jsonfile_prefix=None, **kwargs): - """Format the results to json (standard format for COCO evaluation). - - Args: - results (list[tuple | numpy.ndarray]): Testing results of the - dataset. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - - Returns: - tuple: (result_files, tmp_dir), result_files is a dict containing \ - the json filepaths, tmp_dir is the temporal directory created \ - for saving json files when jsonfile_prefix is not specified. - """ - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - if jsonfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - jsonfile_prefix = osp.join(tmp_dir.name, 'results') - else: - tmp_dir = None - result_files = self.results2json(results, jsonfile_prefix) - return result_files, tmp_dir - - def evaluate(self, - results, - metric='bbox', - logger=None, - jsonfile_prefix=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=None, - metric_items=None): - """Evaluation in COCO protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float], optional): IoU threshold used for - evaluating recalls/mAPs. If set to a list, the average of all - IoUs will also be computed. If not specified, [0.50, 0.55, - 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. - Default: None. - metric_items (list[str] | str, optional): Metric items that will - be returned. If not specified, ``['AR@100', 'AR@300', - 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be - used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', - 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when - ``metric=='bbox' or metric=='segm'``. - - Returns: - dict[str, float]: COCO style evaluation metric. - """ - - metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in metrics: - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - if iou_thrs is None: - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - if metric_items is not None: - if not isinstance(metric_items, list): - metric_items = [metric_items] - - result_files, tmp_dir = self.format_results(results, jsonfile_prefix) - - eval_results = OrderedDict() - cocoGt = self.coco - for metric in metrics: - msg = f'Evaluating {metric}...' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - if metric == 'proposal_fast': - ar = self.fast_eval_recall( - results, proposal_nums, iou_thrs, logger='silent') - log_msg = [] - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') - log_msg = ''.join(log_msg) - print_log(log_msg, logger=logger) - continue - - if metric not in result_files: - raise KeyError(f'{metric} is not in results') - try: - cocoDt = cocoGt.loadRes(result_files[metric]) - except IndexError: - print_log( - 'The testing results of the whole dataset is empty.', - logger=logger, - level=logging.ERROR) - break - - iou_type = 'bbox' if metric == 'proposal' else metric - cocoEval = COCOeval(cocoGt, cocoDt, iou_type) - cocoEval.params.catIds = self.cat_ids - cocoEval.params.imgIds = self.img_ids - cocoEval.params.maxDets = list(proposal_nums) - cocoEval.params.iouThrs = iou_thrs - # mapping of cocoEval.stats - coco_metric_names = { - 'mAP': 0, - 'mAP_50': 1, - 'mAP_75': 2, - 'mAP_s': 3, - 'mAP_m': 4, - 'mAP_l': 5, - 'AR@100': 6, - 'AR@300': 7, - 'AR@1000': 8, - 'AR_s@1000': 9, - 'AR_m@1000': 10, - 'AR_l@1000': 11 - } - if metric_items is not None: - for metric_item in metric_items: - if metric_item not in coco_metric_names: - raise KeyError( - f'metric item {metric_item} is not supported') - - if metric == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if metric_items is None: - metric_items = [ - 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', - 'AR_m@1000', 'AR_l@1000' - ] - - for item in metric_items: - val = float( - f'{cocoEval.stats[coco_metric_names[item]]:.3f}') - eval_results[item] = val - else: - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = cocoEval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, catId in enumerate(self.cat_ids): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - nm = self.coco.loadCats(catId)[0] - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - - num_columns = min(6, len(results_per_category) * 2) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = ['category', 'AP'] * (num_columns // 2) - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - if metric_items is None: - metric_items = [ - 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' - ] - - for metric_item in metric_items: - key = f'{metric}_{metric_item}' - val = float( - f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' - ) - eval_results[key] = val - ap = cocoEval.stats[:6] - eval_results[f'{metric}_mAP_copypaste'] = ( - f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' - f'{ap[4]:.3f} {ap[5]:.3f}') - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/spaces/Catmeow/AI_story_writing/README.md b/spaces/Catmeow/AI_story_writing/README.md deleted file mode 100644 index 19b74e4ff2db84fe4b770f4fe0109040b1b01fe0..0000000000000000000000000000000000000000 --- a/spaces/Catmeow/AI_story_writing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI Story Writing -emoji: 📚 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.8 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/file_operations.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/file_operations.py deleted file mode 100644 index ad145ec956dd9dafd39e09c2244d001cf5febd2f..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/file_operations.py +++ /dev/null @@ -1,267 +0,0 @@ -"""File operations for AutoGPT""" -from __future__ import annotations - -import os -import os.path -from typing import Generator - -import requests -from colorama import Back, Fore -from requests.adapters import HTTPAdapter, Retry - -from autogpt.spinner import Spinner -from autogpt.utils import readable_file_size -from autogpt.workspace import WORKSPACE_PATH, path_in_workspace - -LOG_FILE = "file_logger.txt" -LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE - - -def check_duplicate_operation(operation: str, filename: str) -> bool: - """Check if the operation has already been performed on the given file - - Args: - operation (str): The operation to check for - filename (str): The name of the file to check for - - Returns: - bool: True if the operation has already been performed on the file - """ - log_content = read_file(LOG_FILE) - log_entry = f"{operation}: {filename}\n" - return log_entry in log_content - - -def log_operation(operation: str, filename: str) -> None: - """Log the file operation to the file_logger.txt - - Args: - operation (str): The operation to log - filename (str): The name of the file the operation was performed on - """ - log_entry = f"{operation}: {filename}\n" - - # Create the log file if it doesn't exist - if not os.path.exists(LOG_FILE_PATH): - with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: - f.write("File Operation Logger ") - - append_to_file(LOG_FILE, log_entry, shouldLog=False) - - -def split_file( - content: str, max_length: int = 4000, overlap: int = 0 -) -> Generator[str, None, None]: - """ - Split text into chunks of a specified maximum length with a specified overlap - between chunks. - - :param content: The input text to be split into chunks - :param max_length: The maximum length of each chunk, - default is 4000 (about 1k token) - :param overlap: The number of overlapping characters between chunks, - default is no overlap - :return: A generator yielding chunks of text - """ - start = 0 - content_length = len(content) - - while start < content_length: - end = start + max_length - if end + overlap < content_length: - chunk = content[start : end + overlap - 1] - else: - chunk = content[start:content_length] - - # Account for the case where the last chunk is shorter than the overlap, so it has already been consumed - if len(chunk) <= overlap: - break - - yield chunk - start += max_length - overlap - - -def read_file(filename: str) -> str: - """Read a file and return the contents - - Args: - filename (str): The name of the file to read - - Returns: - str: The contents of the file - """ - try: - filepath = path_in_workspace(filename) - with open(filepath, "r", encoding="utf-8") as f: - content = f.read() - return content - except Exception as e: - return f"Error: {str(e)}" - - -def ingest_file( - filename: str, memory, max_length: int = 4000, overlap: int = 200 -) -> None: - """ - Ingest a file by reading its content, splitting it into chunks with a specified - maximum length and overlap, and adding the chunks to the memory storage. - - :param filename: The name of the file to ingest - :param memory: An object with an add() method to store the chunks in memory - :param max_length: The maximum length of each chunk, default is 4000 - :param overlap: The number of overlapping characters between chunks, default is 200 - """ - try: - print(f"Working with file {filename}") - content = read_file(filename) - content_length = len(content) - print(f"File length: {content_length} characters") - - chunks = list(split_file(content, max_length=max_length, overlap=overlap)) - - num_chunks = len(chunks) - for i, chunk in enumerate(chunks): - print(f"Ingesting chunk {i + 1} / {num_chunks} into memory") - memory_to_add = ( - f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}" - ) - - memory.add(memory_to_add) - - print(f"Done ingesting {num_chunks} chunks from {filename}.") - except Exception as e: - print(f"Error while ingesting file '{filename}': {str(e)}") - - -def write_to_file(filename: str, text: str) -> str: - """Write text to a file - - Args: - filename (str): The name of the file to write to - text (str): The text to write to the file - - Returns: - str: A message indicating success or failure - """ - if check_duplicate_operation("write", filename): - return "Error: File has already been updated." - try: - filepath = path_in_workspace(filename) - directory = os.path.dirname(filepath) - if not os.path.exists(directory): - os.makedirs(directory) - with open(filepath, "w", encoding="utf-8") as f: - f.write(text) - log_operation("write", filename) - return "File written to successfully." - except Exception as e: - return f"Error: {str(e)}" - - -def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str: - """Append text to a file - - Args: - filename (str): The name of the file to append to - text (str): The text to append to the file - - Returns: - str: A message indicating success or failure - """ - try: - filepath = path_in_workspace(filename) - with open(filepath, "a") as f: - f.write(text) - - if shouldLog: - log_operation("append", filename) - - return "Text appended successfully." - except Exception as e: - return f"Error: {str(e)}" - - -def delete_file(filename: str) -> str: - """Delete a file - - Args: - filename (str): The name of the file to delete - - Returns: - str: A message indicating success or failure - """ - if check_duplicate_operation("delete", filename): - return "Error: File has already been deleted." - try: - filepath = path_in_workspace(filename) - os.remove(filepath) - log_operation("delete", filename) - return "File deleted successfully." - except Exception as e: - return f"Error: {str(e)}" - - -def search_files(directory: str) -> list[str]: - """Search for files in a directory - - Args: - directory (str): The directory to search in - - Returns: - list[str]: A list of files found in the directory - """ - found_files = [] - - if directory in {"", "/"}: - search_directory = WORKSPACE_PATH - else: - search_directory = path_in_workspace(directory) - - for root, _, files in os.walk(search_directory): - for file in files: - if file.startswith("."): - continue - relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH) - found_files.append(relative_path) - - return found_files - - -def download_file(url, filename): - """Downloads a file - Args: - url (str): URL of the file to download - filename (str): Filename to save the file as - """ - safe_filename = path_in_workspace(filename) - try: - message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}" - with Spinner(message) as spinner: - session = requests.Session() - retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504]) - adapter = HTTPAdapter(max_retries=retry) - session.mount("http://", adapter) - session.mount("https://", adapter) - - total_size = 0 - downloaded_size = 0 - - with session.get(url, allow_redirects=True, stream=True) as r: - r.raise_for_status() - total_size = int(r.headers.get("Content-Length", 0)) - downloaded_size = 0 - - with open(safe_filename, "wb") as f: - for chunk in r.iter_content(chunk_size=8192): - f.write(chunk) - downloaded_size += len(chunk) - - # Update the progress message - progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}" - spinner.update_message(f"{message} {progress}") - - return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})' - except requests.HTTPError as e: - return f"Got an HTTP Error whilst trying to download file: {e}" - except Exception as e: - return "Error: " + str(e) diff --git a/spaces/CofAI/chat.b4/client/css/message-input.css b/spaces/CofAI/chat.b4/client/css/message-input.css deleted file mode 100644 index de5f58388133bd3b2b2333dd99cecf0110002367..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.b4/client/css/message-input.css +++ /dev/null @@ -1,27 +0,0 @@ -#message-input { - margin-right: 30px; - height: 64px; -} - -#message-input::-webkit-scrollbar { - width: 5px; -} - -#message-input::-webkit-scrollbar-track { - background: #f1f1f1; -} - -#message-input::-webkit-scrollbar-thumb { - background: #c7a2ff; -} - -#message-input::-webkit-scrollbar-thumb:hover { - background: #8b3dff; -} - -@media screen and (max-width: 360px) { - #message-input { - margin: 0; - } -} - diff --git a/spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/gpt4love.py b/spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/gpt4love.py deleted file mode 100644 index 987fdbf8de5c27f7b827183d9c192dcf48d8ddcf..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/gpt4love.py +++ /dev/null @@ -1,48 +0,0 @@ -import json -import sys -from re import findall -from curl_cffi import requests - -config = json.loads(sys.argv[1]) -prompt = config['messages'][-1]['content'] - -headers = { - 'authority': 'api.gptplus.one', - 'accept': 'application/json, text/plain, */*', - 'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4', - 'content-type': 'application/octet-stream', - 'origin': 'https://ai.gptforlove.com/', - 'referer': 'https://ai.gptforlove.com/', - 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', -} - -json_data = { - 'prompt': prompt, - 'options': {} -} - -def format(chunk): - try: - completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0] - print(completion_chunk, flush=True, end='') - - except Exception as e: - print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True) - return - -while True: - try: - response = requests.post('https://api.gptplus.one/api/chat-process', - headers=headers, json=json_data, content_callback=format, impersonate='chrome110') - - exit(0) - - except Exception as e: - print('[ERROR] an error occured, retrying... |', e, flush=True) - continue \ No newline at end of file diff --git a/spaces/Cran-May/SEA-Streamlit/README.md b/spaces/Cran-May/SEA-Streamlit/README.md deleted file mode 100644 index ade259caccd5f8f16064553261e0812436a18cf2..0000000000000000000000000000000000000000 --- a/spaces/Cran-May/SEA-Streamlit/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: 兮辞·析辞-常明 -emoji: 💻 -colorFrom: indigo -colorTo: pink -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: true -models: -- Cran-May/OpenSLIDE ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/version.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/version.py deleted file mode 100644 index 5eed49a42ab22c53962c27e750f24ca0b63153d4..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/version.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Expose version -""" - -__version__ = "3.2.0" -VERSION = __version__.split(".") diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py deleted file mode 100644 index 742bc64ce037a53a765efc80ed773b840af5b4c7..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -from fontTools.pens.transformPen import TransformPen -from fontTools.misc import etree -from fontTools.misc.textTools import tostr -from .parser import parse_path -from .shapes import PathBuilder - - -__all__ = [tostr(s) for s in ("SVGPath", "parse_path")] - - -class SVGPath(object): - """Parse SVG ``path`` elements from a file or string, and draw them - onto a glyph object that supports the FontTools Pen protocol. - - For example, reading from an SVG file and drawing to a Defcon Glyph: - - import defcon - glyph = defcon.Glyph() - pen = glyph.getPen() - svg = SVGPath("path/to/a.svg") - svg.draw(pen) - - Or reading from a string containing SVG data, using the alternative - 'fromstring' (a class method): - - data = ' None: - self._stream = stream - - def __iter__(self) -> Iterator[bytes]: - yield self._stream - - async def __aiter__(self) -> AsyncIterator[bytes]: - yield self._stream - - -class IteratorByteStream(SyncByteStream): - CHUNK_SIZE = 65_536 - - def __init__(self, stream: Iterable[bytes]): - self._stream = stream - self._is_stream_consumed = False - self._is_generator = inspect.isgenerator(stream) - - def __iter__(self) -> Iterator[bytes]: - if self._is_stream_consumed and self._is_generator: - raise StreamConsumed() - - self._is_stream_consumed = True - if hasattr(self._stream, "read"): - # File-like interfaces should use 'read' directly. - chunk = self._stream.read(self.CHUNK_SIZE) - while chunk: - yield chunk - chunk = self._stream.read(self.CHUNK_SIZE) - else: - # Otherwise iterate. - for part in self._stream: - yield part - - -class AsyncIteratorByteStream(AsyncByteStream): - CHUNK_SIZE = 65_536 - - def __init__(self, stream: AsyncIterable[bytes]): - self._stream = stream - self._is_stream_consumed = False - self._is_generator = inspect.isasyncgen(stream) - - async def __aiter__(self) -> AsyncIterator[bytes]: - if self._is_stream_consumed and self._is_generator: - raise StreamConsumed() - - self._is_stream_consumed = True - if hasattr(self._stream, "aread"): - # File-like interfaces should use 'aread' directly. - chunk = await self._stream.aread(self.CHUNK_SIZE) - while chunk: - yield chunk - chunk = await self._stream.aread(self.CHUNK_SIZE) - else: - # Otherwise iterate. - async for part in self._stream: - yield part - - -class UnattachedStream(AsyncByteStream, SyncByteStream): - """ - If a request or response is serialized using pickle, then it is no longer - attached to a stream for I/O purposes. Any stream operations should result - in `httpx.StreamClosed`. - """ - - def __iter__(self) -> Iterator[bytes]: - raise StreamClosed() - - async def __aiter__(self) -> AsyncIterator[bytes]: - raise StreamClosed() - yield b"" # pragma: no cover - - -def encode_content( - content: Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]] -) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]: - if isinstance(content, (bytes, str)): - body = content.encode("utf-8") if isinstance(content, str) else content - content_length = len(body) - headers = {"Content-Length": str(content_length)} if body else {} - return headers, ByteStream(body) - - elif isinstance(content, Iterable) and not isinstance(content, dict): - # `not isinstance(content, dict)` is a bit oddly specific, but it - # catches a case that's easy for users to make in error, and would - # otherwise pass through here, like any other bytes-iterable, - # because `dict` happens to be iterable. See issue #2491. - content_length_or_none = peek_filelike_length(content) - - if content_length_or_none is None: - headers = {"Transfer-Encoding": "chunked"} - else: - headers = {"Content-Length": str(content_length_or_none)} - return headers, IteratorByteStream(content) # type: ignore - - elif isinstance(content, AsyncIterable): - headers = {"Transfer-Encoding": "chunked"} - return headers, AsyncIteratorByteStream(content) - - raise TypeError(f"Unexpected type for 'content', {type(content)!r}") - - -def encode_urlencoded_data( - data: RequestData, -) -> Tuple[Dict[str, str], ByteStream]: - plain_data = [] - for key, value in data.items(): - if isinstance(value, (list, tuple)): - plain_data.extend([(key, primitive_value_to_str(item)) for item in value]) - else: - plain_data.append((key, primitive_value_to_str(value))) - body = urlencode(plain_data, doseq=True).encode("utf-8") - content_length = str(len(body)) - content_type = "application/x-www-form-urlencoded" - headers = {"Content-Length": content_length, "Content-Type": content_type} - return headers, ByteStream(body) - - -def encode_multipart_data( - data: RequestData, files: RequestFiles, boundary: Optional[bytes] -) -> Tuple[Dict[str, str], MultipartStream]: - multipart = MultipartStream(data=data, files=files, boundary=boundary) - headers = multipart.get_headers() - return headers, multipart - - -def encode_text(text: str) -> Tuple[Dict[str, str], ByteStream]: - body = text.encode("utf-8") - content_length = str(len(body)) - content_type = "text/plain; charset=utf-8" - headers = {"Content-Length": content_length, "Content-Type": content_type} - return headers, ByteStream(body) - - -def encode_html(html: str) -> Tuple[Dict[str, str], ByteStream]: - body = html.encode("utf-8") - content_length = str(len(body)) - content_type = "text/html; charset=utf-8" - headers = {"Content-Length": content_length, "Content-Type": content_type} - return headers, ByteStream(body) - - -def encode_json(json: Any) -> Tuple[Dict[str, str], ByteStream]: - body = json_dumps(json).encode("utf-8") - content_length = str(len(body)) - content_type = "application/json" - headers = {"Content-Length": content_length, "Content-Type": content_type} - return headers, ByteStream(body) - - -def encode_request( - content: Optional[RequestContent] = None, - data: Optional[RequestData] = None, - files: Optional[RequestFiles] = None, - json: Optional[Any] = None, - boundary: Optional[bytes] = None, -) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]: - """ - Handles encoding the given `content`, `data`, `files`, and `json`, - returning a two-tuple of (, ). - """ - if data is not None and not isinstance(data, Mapping): - # We prefer to separate `content=` - # for raw request content, and `data=
` for url encoded or - # multipart form content. - # - # However for compat with requests, we *do* still support - # `data=` usages. We deal with that case here, treating it - # as if `content=<...>` had been supplied instead. - message = "Use 'content=<...>' to upload raw bytes/text content." - warnings.warn(message, DeprecationWarning) - return encode_content(data) - - if content is not None: - return encode_content(content) - elif files: - return encode_multipart_data(data or {}, files, boundary) - elif data: - return encode_urlencoded_data(data) - elif json is not None: - return encode_json(json) - - return {}, ByteStream(b"") - - -def encode_response( - content: Optional[ResponseContent] = None, - text: Optional[str] = None, - html: Optional[str] = None, - json: Optional[Any] = None, -) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]: - """ - Handles encoding the given `content`, returning a two-tuple of - (, ). - """ - if content is not None: - return encode_content(content) - elif text is not None: - return encode_text(text) - elif html is not None: - return encode_html(html) - elif json is not None: - return encode_json(json) - - return {}, ByteStream(b"") diff --git "a/spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/pages/03_\360\237\223\235_Upload_Video_File_and_Transcript.py" "b/spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/pages/03_\360\237\223\235_Upload_Video_File_and_Transcript.py" deleted file mode 100644 index 4bce00d5282f5392258bd9b2b6df56607a4810aa..0000000000000000000000000000000000000000 --- "a/spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/pages/03_\360\237\223\235_Upload_Video_File_and_Transcript.py" +++ /dev/null @@ -1,130 +0,0 @@ -import streamlit as st -from streamlit_lottie import st_lottie -from utils import write_vtt, write_srt -import ffmpeg -import requests -from typing import Iterator -from io import StringIO -import numpy as np -import pathlib -import os - - -st.set_page_config(page_title="Auto Subtitled Video Generator", page_icon=":movie_camera:", layout="wide") - -# Define a function that we can use to load lottie files from a link. -@st.cache(allow_output_mutation=True) -def load_lottieurl(url: str): - r = requests.get(url) - if r.status_code != 200: - return None - return r.json() - - -APP_DIR = pathlib.Path(__file__).parent.absolute() - -LOCAL_DIR = APP_DIR / "local_transcript" -LOCAL_DIR.mkdir(exist_ok=True) -save_dir = LOCAL_DIR / "output" -save_dir.mkdir(exist_ok=True) - - -col1, col2 = st.columns([1, 3]) -with col1: - lottie = load_lottieurl("https://assets6.lottiefiles.com/packages/lf20_cjnxwrkt.json") - st_lottie(lottie) - -with col2: - st.write(""" - ## Auto Subtitled Video Generator - ##### ➠ Upload a video file and a transcript as .srt or .vtt file and get a video with subtitles. - ##### ➠ Processing time will increase as the video length increases. """) - - -def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str: - segmentStream = StringIO() - - if format == 'vtt': - write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth) - elif format == 'srt': - write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth) - else: - raise Exception("Unknown format " + format) - - segmentStream.seek(0) - return segmentStream.read() - - -def split_video_audio(uploaded_file): - with open(f"{save_dir}/input.mp4", "wb") as f: - f.write(uploaded_file.read()) - audio = ffmpeg.input(f"{save_dir}/input.mp4") - audio = ffmpeg.output(audio, f"{save_dir}/output.wav", acodec="pcm_s16le", ac=1, ar="16k") - ffmpeg.run(audio, overwrite_output=True) - - -def main(): - uploaded_video = st.file_uploader("Upload Video File", type=["mp4", "avi", "mov", "mkv"]) - # get the name of the input_file - if uploaded_video is not None: - filename = uploaded_video.name[:-4] - else: - filename = None - transcript_file = st.file_uploader("Upload Transcript File", type=["srt", "vtt"]) - if transcript_file is not None: - transcript_name = transcript_file.name - else: - transcript_name = None - if uploaded_video is not None and transcript_file is not None: - if transcript_name[-3:] == "vtt": - with open("uploaded_transcript.vtt", "wb") as f: - f.writelines(transcript_file) - f.close() - with open(os.path.join(os.getcwd(), "uploaded_transcript.vtt"), "rb") as f: - vtt_file = f.read() - if st.button("Generate Video with Subtitles"): - with st.spinner("Generating Subtitled Video"): - split_video_audio(uploaded_video) - video_file = ffmpeg.input(f"{save_dir}/input.mp4") - audio_file = ffmpeg.input(f"{save_dir}/output.wav") - ffmpeg.concat(video_file.filter("subtitles", "uploaded_transcript.vtt"), audio_file, v=1, a=1).output("final.mp4").global_args('-report').run(quiet=True, overwrite_output=True) - video_with_subs = open("final.mp4", "rb") - col3, col4 = st.columns(2) - with col3: - st.video(uploaded_video) - with col4: - st.video(video_with_subs) - st.download_button(label="Download Video with Subtitles", - data=video_with_subs, - file_name=f"{filename}_with_subs.mp4") - - elif transcript_name[-3:] == "srt": - with open("uploaded_transcript.srt", "wb") as f: - f.writelines(transcript_file) - f.close() - with open(os.path.join(os.getcwd(), "uploaded_transcript.srt"), "rb") as f: - srt_file = f.read() - if st.button("Generate Video with Subtitles"): - with st.spinner("Generating Subtitled Video"): - split_video_audio(uploaded_video) - video_file = ffmpeg.input(f"{save_dir}/input.mp4") - audio_file = ffmpeg.input(f"{save_dir}/output.wav") - ffmpeg.concat(video_file.filter("subtitles", "uploaded_transcript.srt"), audio_file, v=1, a=1).output("final.mp4").run(quiet=True, overwrite_output=True) - video_with_subs = open("final.mp4", "rb") - col3, col4 = st.columns(2) - with col3: - st.video(uploaded_video) - with col4: - st.video(video_with_subs) - st.download_button(label="Download Video with Subtitles", - data=video_with_subs, - file_name=f"{filename}_with_subs.mp4") - else: - st.error("Please upload a .srt or .vtt file") - else: - st.info("Please upload a video file and a transcript file") - - -if __name__ == "__main__": - main() - diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/autoeval.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/autoeval.py deleted file mode 100644 index ecc86a1f7b403f57821dde2a2b4f0619c0d6cae3..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/autoeval.py +++ /dev/null @@ -1,37 +0,0 @@ -from collections import defaultdict -from importlib import import_module - -def autoimport_eval(term): - ''' - Used to evaluate an arbitrary command-line constructor specifying - a class, with automatic import of global module names. - ''' - - class DictNamespace(object): - def __init__(self, d): - self.__d__ = d - def __getattr__(self, key): - return self.__d__[key] - - class AutoImportDict(defaultdict): - def __init__(self, wrapped=None, parent=None): - super().__init__() - self.wrapped = wrapped - self.parent = parent - def __missing__(self, key): - if self.wrapped is not None: - if key in self.wrapped: - return self.wrapped[key] - if self.parent is not None: - key = self.parent + '.' + key - if key in __builtins__: - return __builtins__[key] - mdl = import_module(key) - # Return an AutoImportDict for any namespace packages - if hasattr(mdl, '__path__'): # and not hasattr(mdl, '__file__'): - return DictNamespace( - AutoImportDict(wrapped=mdl.__dict__, parent=key)) - return mdl - - return eval(term, {}, AutoImportDict()) - diff --git a/spaces/Dodero1305/Heart-Disease-Chatbot/prepare_retrieval.py b/spaces/Dodero1305/Heart-Disease-Chatbot/prepare_retrieval.py deleted file mode 100644 index 397419ee250675ebf47d8b35acd9386eb61b05f9..0000000000000000000000000000000000000000 --- a/spaces/Dodero1305/Heart-Disease-Chatbot/prepare_retrieval.py +++ /dev/null @@ -1,59 +0,0 @@ -import shutil - -import rank_bm25 -import os - -from rank_bm25 import BM25Okapi - - -def get_bm25_scores(question): - - file_names = [] - corpus = [] - for file_name in os.listdir('./data'): - with open(f'./data/{file_name}', 'r',encoding='cp437') as f: - doc = f.readlines() - - file_names.append(" ".join(file_name.split("-"))) - corpus.append(" ".join(doc)) - - # import matplotlib.pyplot as plt - # plt.hist([len(doc.split()) for doc in corpus], bins=128, range=(0, 5000)) - # plt.show() - - titles = file_names - - words = [ - [word for word in doc.split()] - for doc in titles - ] - bm25 = BM25Okapi(words) - indexs = list(range(len(words))) - - tokenized_query = question.split() - - a = bm25.get_top_n(tokenized_query, indexs, n=5) - folder_path = 'retrieval' - - # Kiểm tra nếu thư mục tồn tại - if os.path.exists(folder_path): - # Xóa tất cả các file trong thư mục - for filename in os.listdir(folder_path): - file_path = os.path.join(folder_path, filename) - try: - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - except Exception as e: - print('Failed to delete %s. Reason: %s' % (file_path, e)) - else: - # Nếu thư mục không tồn tại, tạo thư mục mới - os.makedirs(folder_path) - - # Lưu file mới - for i in range(len(a)): - file_path = os.path.join(folder_path, titles[a[i]]) - - with open(file_path, 'w', encoding='utf-8') as file: - file.write(corpus[a[i]]) \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/__init__.py b/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DragGan/DragGan/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py b/spaces/DragGan/DragGan/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py deleted file mode 100644 index cb50599d59a5e49276b60f516b6aac0954bf6da3..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py +++ /dev/null @@ -1,847 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Network architectures from the paper -"Analyzing and Improving the Image Quality of StyleGAN". -Matches the original implementation of configs E-F by Karras et al. at -https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" - -import numpy as np -import torch -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_resample -from torch_utils.ops import upfirdn2d -from torch_utils.ops import bias_act -from torch_utils.ops import fma - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def normalize_2nd_moment(x, dim=1, eps=1e-8): - return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def modulated_conv2d( - x, # Input tensor of shape [batch_size, in_channels, in_height, in_width]. - weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. - styles, # Modulation coefficients of shape [batch_size, in_channels]. - noise = None, # Optional noise tensor to add to the output activations. - up = 1, # Integer upsampling factor. - down = 1, # Integer downsampling factor. - padding = 0, # Padding with respect to the upsampled image. - resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). - demodulate = True, # Apply weight demodulation? - flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d). - fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation? -): - batch_size = x.shape[0] - out_channels, in_channels, kh, kw = weight.shape - misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk] - misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] - misc.assert_shape(styles, [batch_size, in_channels]) # [NI] - - # Pre-normalize inputs to avoid FP16 overflow. - if x.dtype == torch.float16 and demodulate: - weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk - styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I - - # Calculate per-sample weights and demodulation coefficients. - w = None - dcoefs = None - if demodulate or fused_modconv: - w = weight.unsqueeze(0) # [NOIkk] - w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] - if demodulate: - dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] - if demodulate and fused_modconv: - w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] - - # Execute by scaling the activations before and after the convolution. - if not fused_modconv: - x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) - x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight) - if demodulate and noise is not None: - x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)) - elif demodulate: - x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) - elif noise is not None: - x = x.add_(noise.to(x.dtype)) - return x - - # Execute as one fused op using grouped convolution. - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - batch_size = int(batch_size) - misc.assert_shape(x, [batch_size, in_channels, None, None]) - x = x.reshape(1, -1, *x.shape[2:]) - w = w.reshape(-1, in_channels, kh, kw) - x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight) - x = x.reshape(batch_size, -1, *x.shape[2:]) - if noise is not None: - x = x.add_(noise) - return x - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class FullyConnectedLayer(torch.nn.Module): - def __init__(self, - in_features, # Number of input features. - out_features, # Number of output features. - bias = True, # Apply additive bias before the activation function? - activation = 'linear', # Activation function: 'relu', 'lrelu', etc. - lr_multiplier = 1, # Learning rate multiplier. - bias_init = 0, # Initial value for the additive bias. - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.activation = activation - self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier) - self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None - self.weight_gain = lr_multiplier / np.sqrt(in_features) - self.bias_gain = lr_multiplier - - def forward(self, x): - w = self.weight.to(x.dtype) * self.weight_gain - b = self.bias - if b is not None: - b = b.to(x.dtype) - if self.bias_gain != 1: - b = b * self.bias_gain - - if self.activation == 'linear' and b is not None: - x = torch.addmm(b.unsqueeze(0), x, w.t()) - else: - x = x.matmul(w.t()) - x = bias_act.bias_act(x, b, act=self.activation) - return x - - def extra_repr(self): - return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Conv2dLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - kernel_size, # Width and height of the convolution kernel. - bias = True, # Apply additive bias before the activation function? - activation = 'linear', # Activation function: 'relu', 'lrelu', etc. - up = 1, # Integer upsampling factor. - down = 1, # Integer downsampling factor. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output to +-X, None = disable clamping. - channels_last = False, # Expect the input to have memory_format=channels_last? - trainable = True, # Update the weights of this layer during training? - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.activation = activation - self.up = up - self.down = down - self.conv_clamp = conv_clamp - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) - self.act_gain = bias_act.activation_funcs[activation].def_gain - - memory_format = torch.channels_last if channels_last else torch.contiguous_format - weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format) - bias = torch.zeros([out_channels]) if bias else None - if trainable: - self.weight = torch.nn.Parameter(weight) - self.bias = torch.nn.Parameter(bias) if bias is not None else None - else: - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - - def forward(self, x, gain=1): - w = self.weight * self.weight_gain - b = self.bias.to(x.dtype) if self.bias is not None else None - flip_weight = (self.up == 1) # slightly faster - x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight) - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp) - return x - - def extra_repr(self): - return ' '.join([ - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},', - f'up={self.up}, down={self.down}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class MappingNetwork(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality, 0 = no latent. - c_dim, # Conditioning label (C) dimensionality, 0 = no label. - w_dim, # Intermediate latent (W) dimensionality. - num_ws, # Number of intermediate latents to output, None = do not broadcast. - num_layers = 8, # Number of mapping layers. - embed_features = None, # Label embedding dimensionality, None = same as w_dim. - layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers. - w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.num_ws = num_ws - self.num_layers = num_layers - self.w_avg_beta = w_avg_beta - - if embed_features is None: - embed_features = w_dim - if c_dim == 0: - embed_features = 0 - if layer_features is None: - layer_features = w_dim - features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim] - - if c_dim > 0: - self.embed = FullyConnectedLayer(c_dim, embed_features) - for idx in range(num_layers): - in_features = features_list[idx] - out_features = features_list[idx + 1] - layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier) - setattr(self, f'fc{idx}', layer) - - if num_ws is not None and w_avg_beta is not None: - self.register_buffer('w_avg', torch.zeros([w_dim])) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): - # Embed, normalize, and concat inputs. - x = None - with torch.autograd.profiler.record_function('input'): - if self.z_dim > 0: - misc.assert_shape(z, [None, self.z_dim]) - x = normalize_2nd_moment(z.to(torch.float32)) - if self.c_dim > 0: - misc.assert_shape(c, [None, self.c_dim]) - y = normalize_2nd_moment(self.embed(c.to(torch.float32))) - x = torch.cat([x, y], dim=1) if x is not None else y - - # Main layers. - for idx in range(self.num_layers): - layer = getattr(self, f'fc{idx}') - x = layer(x) - - # Update moving average of W. - if update_emas and self.w_avg_beta is not None: - with torch.autograd.profiler.record_function('update_w_avg'): - self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) - - # Broadcast. - if self.num_ws is not None: - with torch.autograd.profiler.record_function('broadcast'): - x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) - - # Apply truncation. - if truncation_psi != 1: - with torch.autograd.profiler.record_function('truncate'): - assert self.w_avg_beta is not None - if self.num_ws is None or truncation_cutoff is None: - x = self.w_avg.lerp(x, truncation_psi) - else: - x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) - return x - - def extra_repr(self): - return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - w_dim, # Intermediate latent (W) dimensionality. - resolution, # Resolution of this layer. - kernel_size = 3, # Convolution kernel size. - up = 1, # Integer upsampling factor. - use_noise = True, # Enable noise input? - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - channels_last = False, # Use channels_last format for the weights? - square = False, # default if for rectangle images - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.w_dim = w_dim - self.resolution = resolution - self.up = up - self.use_noise = use_noise - self.activation = activation - self.conv_clamp = conv_clamp - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.act_gain = bias_act.activation_funcs[activation].def_gain - self.square=square - - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - if use_noise: - if self.square: - self.register_buffer('noise_const', torch.randn([resolution, resolution])) - else: - self.register_buffer('noise_const', torch.randn([resolution, resolution // 2])) - self.noise_strength = torch.nn.Parameter(torch.zeros([])) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - - def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1): - assert noise_mode in ['random', 'const', 'none'] - in_resolution = self.resolution // self.up - if self.square: - misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution]) - else: - misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution // 2]) - styles = self.affine(w) - - noise = None - if self.use_noise and noise_mode == 'random': - if self.square: - noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength - else: - noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution // 2], device=x.device) * self.noise_strength - if self.use_noise and noise_mode == 'const': - noise = self.noise_const * self.noise_strength - - flip_weight = (self.up == 1) # slightly faster - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, - padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv) - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp) - return x - - def extra_repr(self): - return ' '.join([ - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},', - f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class ToRGBLayer(torch.nn.Module): - def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.w_dim = w_dim - self.conv_clamp = conv_clamp - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) - - def forward(self, x, w, fused_modconv=True): - styles = self.affine(w) * self.weight_gain - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv) - x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) - return x - - def extra_repr(self): - return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisBlock(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels, 0 = first block. - out_channels, # Number of output channels. - w_dim, # Intermediate latent (W) dimensionality. - resolution, # Resolution of this block. - img_channels, # Number of output color channels. - is_last, # Is this the last block? - architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. - use_fp16 = False, # Use FP16 for this block? - fp16_channels_last = False, # Use channels-last memory format with FP16? - square = False, # default is for rectangle images - fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training. - **layer_kwargs, # Arguments for SynthesisLayer. - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.w_dim = w_dim - self.resolution = resolution - self.img_channels = img_channels - self.is_last = is_last - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.fused_modconv_default = fused_modconv_default - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.num_conv = 0 - self.num_torgb = 0 - self.square = square - - if in_channels == 0: - if self.square: - self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution])) - else: # rectangle - self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution // 2])) - - if in_channels != 0: - self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2, - resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs) - self.num_conv += 1 - - self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, - conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs) - self.num_conv += 1 - - if is_last or architecture == 'skip': - self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, - conv_clamp=conv_clamp, channels_last=self.channels_last) - self.num_torgb += 1 - - if in_channels != 0 and architecture == 'resnet': - self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, - resample_filter=resample_filter, channels_last=self.channels_last) - - def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): - _ = update_emas # unused - misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) - w_iter = iter(ws.unbind(dim=1)) - if ws.device.type != 'cuda': - force_fp32 = True - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - if fused_modconv is None: - fused_modconv = self.fused_modconv_default - if fused_modconv == 'inference_only': - fused_modconv = (not self.training) - - # Input. - if self.in_channels == 0: - x = self.const.to(dtype=dtype, memory_format=memory_format) - x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) - else: - if self.square: - misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2]) - else: # rectangle - misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 4]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # Main layers. - if self.in_channels == 0: - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - elif self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) - x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) - x = y.add_(x) - else: - x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - - # ToRGB. - if img is not None: - if self.square: - misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2]) - else: - misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 4]) - img = upfirdn2d.upsample2d(img, self.resample_filter) - if self.is_last or self.architecture == 'skip': - y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) - y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) - img = img.add_(y) if img is not None else y - - assert x.dtype == dtype - assert img is None or img.dtype == torch.float32 - return x, img - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisNetwork(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output image resolution. - img_channels, # Number of color channels. - square, - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - num_fp16_res = 4, # Use FP16 for the N highest resolutions. - **block_kwargs, # Arguments for SynthesisBlock. - ): - assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 - super().__init__() - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_resolution_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.square=square - self.num_fp16_res = num_fp16_res - self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)] - channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} - fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) - - self.num_ws = 0 - for res in self.block_resolutions: - in_channels = channels_dict[res // 2] if res > 4 else 0 - out_channels = channels_dict[res] - use_fp16 = (res >= fp16_resolution) - is_last = (res == self.img_resolution) - block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, - img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, square=square,**block_kwargs) - self.num_ws += block.num_conv - if is_last: - self.num_ws += block.num_torgb - setattr(self, f'b{res}', block) - - def forward(self, ws, **block_kwargs): - block_ws = [] - with torch.autograd.profiler.record_function('split_ws'): - misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) - ws = ws.to(torch.float32) - w_idx = 0 - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) - w_idx += block.num_conv - - x = img = None - for res, cur_ws in zip(self.block_resolutions, block_ws): - block = getattr(self, f'b{res}') - x, img = block(x, img, cur_ws, **block_kwargs) - return img - - def extra_repr(self): - return ' '.join([ - f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', - f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', - f'num_fp16_res={self.num_fp16_res:d}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Generator(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - c_dim, # Conditioning label (C) dimensionality. - w_dim, # Intermediate latent (W) dimensionality. - square, - img_resolution, # Output resolution. - img_channels, # Number of output color channels. - mapping_kwargs = {}, # Arguments for MappingNetwork. - **synthesis_kwargs, # Arguments for SynthesisNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.square = square - self.img_resolution = img_resolution - self.img_channels = img_channels - self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, square=square, **synthesis_kwargs) - self.num_ws = self.synthesis.num_ws - self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): - ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) - img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) - return img - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class DiscriminatorBlock(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels, 0 = first block. - tmp_channels, # Number of intermediate channels. - out_channels, # Number of output channels. - resolution, # Resolution of this block. - img_channels, # Number of input color channels. - first_layer_idx, # Index of the first layer. - architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - use_fp16 = False, # Use FP16 for this block? - fp16_channels_last = False, # Use channels-last memory format with FP16? - freeze_layers = 0, # Freeze-D: Number of layers to freeze. - square = False, - ): - assert in_channels in [0, tmp_channels] - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.resolution = resolution - self.img_channels = img_channels - self.first_layer_idx = first_layer_idx - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.square = square - - self.num_layers = 0 - def trainable_gen(): - while True: - layer_idx = self.first_layer_idx + self.num_layers - trainable = (layer_idx >= freeze_layers) - self.num_layers += 1 - yield trainable - trainable_iter = trainable_gen() - - if in_channels == 0 or architecture == 'skip': - self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation, - trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last) - - self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation, - trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last) - - self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2, - trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last) - - if architecture == 'resnet': - self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2, - trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last) - - def forward(self, x, img, force_fp32=False): - if (x if x is not None else img).device.type != 'cuda': - force_fp32 = True - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - - # Input. - if x is not None: - if self.square: - misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) - else: - misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution // 2]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # FromRGB. - if self.in_channels == 0 or self.architecture == 'skip': - if self.square: - misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) - else: - misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution // 2]) - img = img.to(dtype=dtype, memory_format=memory_format) - y = self.fromrgb(img) - x = x + y if x is not None else y - img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None - - # Main layers. - if self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) - x = self.conv0(x) - x = self.conv1(x, gain=np.sqrt(0.5)) - x = y.add_(x) - else: - x = self.conv0(x) - x = self.conv1(x) - - assert x.dtype == dtype - return x, img - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class MinibatchStdLayer(torch.nn.Module): - def __init__(self, group_size, num_channels=1): - super().__init__() - self.group_size = group_size - self.num_channels = num_channels - - def forward(self, x): - N, C, H, W = x.shape - with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants - G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N - F = self.num_channels - c = C // F - - y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c. - y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group. - y = y.square().mean(dim=0) # [nFcHW] Calc variance over group. - y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group. - y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels. - y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions. - y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels. - x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels. - return x - - def extra_repr(self): - return f'group_size={self.group_size}, num_channels={self.num_channels:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class DiscriminatorEpilogue(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label. - resolution, # Resolution of this block. - img_channels, # Number of input color channels. - architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. - mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch. - mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - square = False, - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.cmap_dim = cmap_dim - self.resolution = resolution - self.img_channels = img_channels - self.architecture = architecture - self.square = square - - if architecture == 'skip': - self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation) - self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None - self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp) - - if self.square: - self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation) - else: - self.fc = FullyConnectedLayer(in_channels * (resolution ** 2 // 2), in_channels, activation=activation) - - self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim) - - def forward(self, x, img, cmap, force_fp32=False): - if self.square: - misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) - else: - misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution // 2]) # [NCHW] - - _ = force_fp32 # unused - dtype = torch.float32 - memory_format = torch.contiguous_format - - # FromRGB. - x = x.to(dtype=dtype, memory_format=memory_format) - if self.architecture == 'skip': - if self.square: - misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) - else: - misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution // 2]) - - img = img.to(dtype=dtype, memory_format=memory_format) - x = x + self.fromrgb(img) - - # Main layers. - if self.mbstd is not None: - x = self.mbstd(x) - x = self.conv(x) - x = self.fc(x.flatten(1)) - x = self.out(x) - - # Conditioning. - if self.cmap_dim > 0: - misc.assert_shape(cmap, [None, self.cmap_dim]) - x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) - - assert x.dtype == dtype - return x - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Discriminator(torch.nn.Module): - def __init__(self, - c_dim, # Conditioning label (C) dimensionality. - img_resolution, # Input resolution. - img_channels, # Number of input color channels. - architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - num_fp16_res = 4, # Use FP16 for the N highest resolutions. - conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. - cmap_dim = None, # Dimensionality of mapped conditioning label, None = default. - square = False, # default for rectangle images - block_kwargs = {}, # Arguments for DiscriminatorBlock. - mapping_kwargs = {}, # Arguments for MappingNetwork. - epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue. - ): - super().__init__() - self.c_dim = c_dim - self.img_resolution = img_resolution - self.img_resolution_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.square = square - self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] - channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} - fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) - - if cmap_dim is None: - cmap_dim = channels_dict[4] - if c_dim == 0: - cmap_dim = 0 - - common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) - cur_layer_idx = 0 - for res in self.block_resolutions: - in_channels = channels_dict[res] if res < img_resolution else 0 - tmp_channels = channels_dict[res] - out_channels = channels_dict[res // 2] - use_fp16 = (res >= fp16_resolution) - block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, - first_layer_idx=cur_layer_idx, use_fp16=use_fp16, square=square, **block_kwargs, **common_kwargs) - setattr(self, f'b{res}', block) - cur_layer_idx += block.num_layers - if c_dim > 0: - self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs) - self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, square=square, **epilogue_kwargs, **common_kwargs) - - def forward(self, img, c, update_emas=False, **block_kwargs): - _ = update_emas # unused - x = None - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - x, img = block(x, img, **block_kwargs) - - cmap = None - if self.c_dim > 0: - cmap = self.mapping(None, c) - x = self.b4(x, img, cmap) - return x - - def extra_repr(self): - return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' - -#---------------------------------------------------------------------------- diff --git a/spaces/ECCV2022/bytetrack/tutorials/jde/README.md b/spaces/ECCV2022/bytetrack/tutorials/jde/README.md deleted file mode 100644 index cd915a2225a09b013b2c3ab55b5b2d7e19c66ec0..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/jde/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# JDE - -Step1. git clone https://github.com/Zhongdao/Towards-Realtime-MOT.git - - -Step2. replace https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py - -Step3. download JDE model trained on MIX and MOT17_half (mix_mot17_half_jde.pt): [google](https://drive.google.com/file/d/1jUiIbaHFf75Jq6thOGI3CPygMMBy6850/view?usp=sharing), [baidu(code:ccdd)](https://pan.baidu.com/s/10se81ZktkUDUWn2dZzkk_Q) - -Step4. put track_half.py under https://github.com/Zhongdao/Towards-Realtime-MOT and run: -``` -python3 track_half.py --cfg ./cfg/yolov3_1088x608.cfg --weights weights/mix_mot17_half_jde.pt -``` - - -## Notes -byte_tracker: only motion - -tracker: motion + reid diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_new.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_new.py deleted file mode 100644 index 44153b6a23399c6938affc61c71919eaa172bcee..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_new.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - self.conv3 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM( - input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True - ) - self.dense = nn.Sequential( - nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU() - ) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/spaces/EronSamez/RVC_HFmeu/lib/uvr5_pack/lib_v5/layers_new.py b/spaces/EronSamez/RVC_HFmeu/lib/uvr5_pack/lib_v5/layers_new.py deleted file mode 100644 index 0c13e60b0dd136d9115a535101c6dbb2a25c6833..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/lib/uvr5_pack/lib_v5/layers_new.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - self.conv3 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM( - input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True - ) - self.dense = nn.Sequential( - nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU() - ) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/spaces/EronSamez/RVC_HFmeu/utils/README.md b/spaces/EronSamez/RVC_HFmeu/utils/README.md deleted file mode 100644 index fb45a36b5909585aa964f2033762ee59b55526b0..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/utils/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# External Colab Code -Code used to make Google Colab work correctly -- Repo link: https://github.com/IAHispano/Applio-RVC-Fork/ - -Thanks to https://github.com/kalomaze/externalcolabcode - diff --git a/spaces/EysCanacan/Scikit-LLM-Demo-Eys/app.py b/spaces/EysCanacan/Scikit-LLM-Demo-Eys/app.py deleted file mode 100644 index 1c01abe6ab762da3c1c6803f258b98578c4a6763..0000000000000000000000000000000000000000 --- a/spaces/EysCanacan/Scikit-LLM-Demo-Eys/app.py +++ /dev/null @@ -1,188 +0,0 @@ -import streamlit as st -import pandas as pd -from skllm import ZeroShotGPTClassifier, MultiLabelZeroShotGPTClassifier, FewShotGPTClassifier -from skllm.config import SKLLMConfig - -SKLLMConfig.set_openai_key("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") -SKLLMConfig.set_openai_org("org-LaVV3RIeD17wULvDvKV1G6LZ") - -# Initialize the classifiers -clf_zero_shot = ZeroShotGPTClassifier(openai_model="gpt-3.5-turbo") -clf_multi_label = MultiLabelZeroShotGPTClassifier(openai_model="gpt-3.5-turbo") -clf_few_shot = FewShotGPTClassifier(openai_model="gpt-3.5-turbo") - -# Classification options -classification_options = { - "About Scikit-LLM": None, - "Zero Shot": clf_zero_shot, - "Multi-label Zero Shot": clf_multi_label, - "Few-Shot": clf_few_shot, - -} - - - -# Dropdown for selecting the classification option -classification_option = st.selectbox("Select the classification option:", list(classification_options.keys())) - -if classification_option == "About Scikit-LLM": - st.markdown("### Scikit-LLM") - st.markdown("NLP is an important frontier in data science. However, it has its limitations. We’ve built some pretty cool libraries and techniques, but there’s a lot room for improvement. This is where large language models, or LLMs for short, come in. They give us a chance to push NLP even further.") - st.markdown("Scikit-learn, a well-known player in the machine learning field, supercharged with the cutting-edge technology of large language models. Now, that’s exactly where scikit-llm comes in. It takes the power of OpenAI’s API and wraps it up in a familiar sklearn package.") - st.markdown("### Know More About Scikit-LLM") - st.markdown("github.com/iryna-kondr/scikit-llm") - - -if classification_option == "Zero Shot": - # Check if the tutorial checkbox is checked and the tutorial has not been displayed yet - - # Display the tutorial text - st.markdown("### Zero Shot Classifier Tutorial") - st.markdown("The ZeroShotClassifier is a type of classifier. It allows you to input candidate labels of your choice where texts can be classified based on the provided labels.") - st.markdown("For example, you can include positive and negative categories. If a statement says that the product is terrible, the classifier can understand the meaning of what 'negative' means and label the statement as negative.") - st.markdown("#### How to Use:") - st.markdown("1. Input your candidate labels as a comma-separated list.") - st.markdown("2. Input your texts to be classified. Separate them with line breaks.") - st.markdown("3. Press the button to classify the texts, and the results will be displayed in a data frame.") - - # Get candidate labels from user - default_candidate_labels = "Positive, Negative, Neutral" - candidate_labels_input = st.text_input("Enter candidate labels as a comma-separated list (e.g., positive, negative, neutral):", default_candidate_labels) - candidate_labels = candidate_labels_input.split(',') - - # Get user input as a block of text - default_user_input = "I really love this product!\nI hate this product so much!\nI have nothing to say." - user_input = st.text_area("Enter the texts to be classified (one per line)", default_user_input) - - # Split the block of text into individual lines - user_inputs = user_input.splitlines() - - # Create a button to run the classification - if st.button("Run"): - # Fit the classifier with some dummy data and the user-provided candidate labels - clf_zero_shot.fit(["dummy"], candidate_labels) - - # Classify the user inputs - results = [] - for user_input in user_inputs: - sentiment = clf_zero_shot.predict([user_input])[0] - results.append({'Text': user_input, 'Sentiment': sentiment}) - - # Create a dataframe from the results - results_df = pd.DataFrame(results) - - # Display the results - st.dataframe(results_df) - - - -if classification_option == "Multi-label Zero Shot": - st.markdown("### Multi Label Zero Shot Classifier") - st.markdown("MultiLabelZeroShotClassifier allows you to input multiple candidate labels of your choice. A text can have multiple labels.") - st.markdown("For example, if you have a statement talking about how they recommend the product because of the quality and price, given that you have these labels, the statement will be classified under both 'quality' and 'price'.") - st.markdown("#### How to Use:") - st.markdown("1. Input your candidate labels as a comma-separated list.") - st.markdown("2. Input your texts to be classified. Separate them with line breaks.") - st.markdown("3. Press the button to classify the texts, and the results will be displayed in a data frame.") - - st.markdown("### Input Your Own Text") - - # Get candidate labels from user - default_candidate_labels = "Quality, Customer Service, Price" - candidate_labels_input = st.text_input("Enter candidate labels as a comma-separated list:", default_candidate_labels) - candidate_labels = candidate_labels_input.split(',') - - # Get user input as a block of text - default_user_input = "The materials of this product is extremely nice. I can't believe it's this cheap!\nI hate how it takes so long before the company responds to my complaints.\n" - user_input = st.text_area("Enter the texts to be classified (one per line):", default_user_input) - - # Split the block of text into individual lines - user_inputs = user_input.splitlines() - - # Create a button to run the classification - if st.button("Run"): - # Fit the classifier with the user-provided candidate labels - clf_multi_label.fit(None, [candidate_labels]) - - # Classify the user inputs - results = [] - for user_input in user_inputs: - labels = clf_multi_label.predict([user_input])[0] - results.append({'Text': user_input, 'Labels': ', '.join(labels)}) - - # Create a dataframe from the results - results_df = pd.DataFrame(results) - - # Display the results - st.dataframe(results_df) - - -elif classification_option == "Few-Shot": - st.markdown("### Few Shot Classifier Tutorial") - st.markdown("The Few Shot Classifier allows you to input candidate labels and sample texts for additional context.") - st.markdown("By providing example texts classified as ‘satisfied’ or ‘dissatisfied,’ the classifier learns to understand the context of the texts and their corresponding labels. Once trained, it can classify new texts based on whether they are categorized as ‘satisfied’ or ‘dissatisfied’ according to the learned context.") - st.markdown("#### How to Use:") - st.markdown("1. Enter the number of candidate labels") - st.markdown("2. Input the name of the label per class") - st.markdown("3. Include what are the sample texts per label") - st.markdown("4. Input the texts to be classified") - st.markdown("5. Press the button to generate classified texts") - st.markdown("#### Input your own text:") - - # Default values - default_labels = ["Satisfied", "Dissatisfied"] - default_texts = [ - ["I love this product so much. I will recommend it to my family", - "Spending this much is so worth it because of the built quality", - "I don't regret buying this product!"], - ["It doesn't work well, this product is so bad.", - "The quality is not good at all, it feels so flimsy.", - "I wasted my money on this!"] - ] - - default_input_to_classify = [ - "Hoping that the company will make more of this product!", - "I salute the designers of this product.", - "What's wrong with this company making the product?", - "I regret buying this product. I hate it!" - ] - - num_classes = len(default_labels) - - # Get user input for texts and their corresponding labels - train_data = [] - for i in range(num_classes): - class_label = st.text_input(f"Enter the label for class {i+1}:", value=default_labels[i]) - texts_input = st.text_area(f"Enter the texts for class {i+1} (one per line):", value="\n".join(default_texts[i])) - - if class_label and texts_input: # Check if class label and texts have been entered - texts = texts_input.splitlines() - train_data.extend([(text, class_label) for text in texts]) - - # Create a button to run the classification - if st.button("Classify"): - if train_data: # Check if any training data has been entered - # Convert data to a dataframe - train_df = pd.DataFrame(train_data, columns=['Text', 'Label']) - - # Fit the classifier on the training data - clf_few_shot.fit(train_df['Text'], train_df['Label']) - - # Get user input for texts to be classified - user_input = st.text_area("Enter the texts to classify (one per line):", value="\n".join(default_input_to_classify)) - - if user_input: # Check if any text to be classified has been entered - # Split the block of text into individual lines - user_inputs = user_input.splitlines() - - # Predict the labels for the test data - predicted_labels = clf_few_shot.predict(user_inputs) - - # Create a dataframe to display the results - results_df = pd.DataFrame({ - 'Review': user_inputs, - 'Predicted Label': predicted_labels - }) - - # Display the dataframe - st.dataframe(results_df) \ No newline at end of file diff --git a/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp b/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp deleted file mode 100644 index 593ce3129dc1574dbc8fc8b088cf595df215de93..0000000000000000000000000000000000000000 --- a/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp +++ /dev/null @@ -1,103 +0,0 @@ - -#include -#include - -#include "libipc/shm.h" - -#include "libipc/utility/pimpl.h" -#include "libipc/memory/resource.h" - -namespace ipc { -namespace shm { - -class handle::handle_ : public pimpl { -public: - shm::id_t id_ = nullptr; - void* m_ = nullptr; - - ipc::string n_; - std::size_t s_ = 0; -}; - -handle::handle() - : p_(p_->make()) { -} - -handle::handle(char const * name, std::size_t size, unsigned mode) - : handle() { - acquire(name, size, mode); -} - -handle::handle(handle&& rhs) - : handle() { - swap(rhs); -} - -handle::~handle() { - release(); - p_->clear(); -} - -void handle::swap(handle& rhs) { - std::swap(p_, rhs.p_); -} - -handle& handle::operator=(handle rhs) { - swap(rhs); - return *this; -} - -bool handle::valid() const noexcept { - return impl(p_)->m_ != nullptr; -} - -std::size_t handle::size() const noexcept { - return impl(p_)->s_; -} - -char const * handle::name() const noexcept { - return impl(p_)->n_.c_str(); -} - -std::int32_t handle::ref() const noexcept { - return shm::get_ref(impl(p_)->id_); -} - -void handle::sub_ref() noexcept { - shm::sub_ref(impl(p_)->id_); -} - -bool handle::acquire(char const * name, std::size_t size, unsigned mode) { - release(); - impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode); - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); - return valid(); -} - -std::int32_t handle::release() { - if (impl(p_)->id_ == nullptr) return -1; - return shm::release(detach()); -} - -void* handle::get() const { - return impl(p_)->m_; -} - -void handle::attach(id_t id) { - if (id == nullptr) return; - release(); - impl(p_)->id_ = id; - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); -} - -id_t handle::detach() { - auto old = impl(p_)->id_; - impl(p_)->id_ = nullptr; - impl(p_)->m_ = nullptr; - impl(p_)->s_ = 0; - impl(p_)->n_.clear(); - return old; -} - -} // namespace shm -} // namespace ipc diff --git a/spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/live2d.js b/spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/live2d.js deleted file mode 100644 index 2cf559be672c438dfbd35db61eea12465ed0dffb..0000000000000000000000000000000000000000 --- a/spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/live2d.js +++ /dev/null @@ -1,4238 +0,0 @@ -! -function(t) { - function i(r) { - if (e[r]) return e[r].exports; - var o = e[r] = { - i: r, - l: !1, - exports: {} - }; - return t[r].call(o.exports, o, o.exports, i), o.l = !0, o.exports - } - var e = {}; - i.m = t, i.c = e, i.d = function(t, e, r) { - i.o(t, e) || Object.defineProperty(t, e, { - configurable: !1, - enumerable: !0, - get: r - }) - }, i.n = function(t) { - var e = t && t.__esModule ? - function() { - return t. - default - } : function() { - return t - }; - return i.d(e, "a", e), e - }, i.o = function(t, i) { - return Object.prototype.hasOwnProperty.call(t, i) - }, i.p = "", i(i.s = 4) -}([function(t, i, e) { - "use strict"; - - function r() { - this.live2DModel = null, this.modelMatrix = null, this.eyeBlink = null, this.physics = null, this.pose = null, this.debugMode = !1, this.initialized = !1, this.updating = !1, this.alpha = 1, this.accAlpha = 0, this.lipSync = !1, this.lipSyncValue = 0, this.accelX = 0, this.accelY = 0, this.accelZ = 0, this.dragX = 0, this.dragY = 0, this.startTimeMSec = null, this.mainMotionManager = new h, this.expressionManager = new h, this.motions = {}, this.expressions = {}, this.isTexLoaded = !1 - } - function o() { - AMotion.prototype.constructor.call(this), this.paramList = new Array - } - function n() { - this.id = "", this.type = -1, this.value = null - } - function s() { - this.nextBlinkTime = null, this.stateStartTime = null, this.blinkIntervalMsec = null, this.eyeState = g.STATE_FIRST, this.blinkIntervalMsec = 4e3, this.closingMotionMsec = 100, this.closedMotionMsec = 50, this.openingMotionMsec = 150, this.closeIfZero = !0, this.eyeID_L = "PARAM_EYE_L_OPEN", this.eyeID_R = "PARAM_EYE_R_OPEN" - } - function _() { - this.tr = new Float32Array(16), this.identity() - } - function a(t, i) { - _.prototype.constructor.call(this), this.width = t, this.height = i - } - function h() { - MotionQueueManager.prototype.constructor.call(this), this.currentPriority = null, this.reservePriority = null, this.super = MotionQueueManager.prototype - } - function l() { - this.physicsList = new Array, this.startTimeMSec = UtSystem.getUserTimeMSec() - } - function $() { - this.lastTime = 0, this.lastModel = null, this.partsGroups = new Array - } - function u(t) { - this.paramIndex = -1, this.partsIndex = -1, this.link = null, this.id = t - } - function p() { - this.EPSILON = .01, this.faceTargetX = 0, this.faceTargetY = 0, this.faceX = 0, this.faceY = 0, this.faceVX = 0, this.faceVY = 0, this.lastTimeSec = 0 - } - function f() { - _.prototype.constructor.call(this), this.screenLeft = null, this.screenRight = null, this.screenTop = null, this.screenBottom = null, this.maxLeft = null, this.maxRight = null, this.maxTop = null, this.maxBottom = null, this.max = Number.MAX_VALUE, this.min = 0 - } - function c() {} - var d = 0; - r.prototype.getModelMatrix = function() { - return this.modelMatrix - }, r.prototype.setAlpha = function(t) { - t > .999 && (t = 1), t < .001 && (t = 0), this.alpha = t - }, r.prototype.getAlpha = function() { - return this.alpha - }, r.prototype.isInitialized = function() { - return this.initialized - }, r.prototype.setInitialized = function(t) { - this.initialized = t - }, r.prototype.isUpdating = function() { - return this.updating - }, r.prototype.setUpdating = function(t) { - this.updating = t - }, r.prototype.getLive2DModel = function() { - return this.live2DModel - }, r.prototype.setLipSync = function(t) { - this.lipSync = t - }, r.prototype.setLipSyncValue = function(t) { - this.lipSyncValue = t - }, r.prototype.setAccel = function(t, i, e) { - this.accelX = t, this.accelY = i, this.accelZ = e - }, r.prototype.setDrag = function(t, i) { - this.dragX = t, this.dragY = i - }, r.prototype.getMainMotionManager = function() { - return this.mainMotionManager - }, r.prototype.getExpressionManager = function() { - return this.expressionManager - }, r.prototype.loadModelData = function(t, i) { - var e = c.getPlatformManager(); - this.debugMode && e.log("Load model : " + t); - var r = this; - e.loadLive2DModel(t, function(t) { - if (r.live2DModel = t, r.live2DModel.saveParam(), 0 != Live2D.getError()) return void console.error("Error : Failed to loadModelData()."); - r.modelMatrix = new a(r.live2DModel.getCanvasWidth(), r.live2DModel.getCanvasHeight()), r.modelMatrix.setWidth(2), r.modelMatrix.setCenterPosition(0, 0), i(r.live2DModel) - }) - }, r.prototype.loadTexture = function(t, i, e) { - d++; - var r = c.getPlatformManager(); - this.debugMode && r.log("Load Texture : " + i); - var o = this; - r.loadTexture(this.live2DModel, t, i, function() { - d--, 0 == d && (o.isTexLoaded = !0), "function" == typeof e && e() - }) - }, r.prototype.loadMotion = function(t, i, e) { - var r = c.getPlatformManager(); - this.debugMode && r.log("Load Motion : " + i); - var o = null, - n = this; - r.loadBytes(i, function(i) { - o = Live2DMotion.loadMotion(i), null != t && (n.motions[t] = o), e(o) - }) - }, r.prototype.loadExpression = function(t, i, e) { - var r = c.getPlatformManager(); - this.debugMode && r.log("Load Expression : " + i); - var n = this; - r.loadBytes(i, function(i) { - null != t && (n.expressions[t] = o.loadJson(i)), "function" == typeof e && e() - }) - }, r.prototype.loadPose = function(t, i) { - var e = c.getPlatformManager(); - this.debugMode && e.log("Load Pose : " + t); - var r = this; - try { - e.loadBytes(t, function(t) { - r.pose = $.load(t), "function" == typeof i && i() - }) - } catch (t) { - console.warn(t) - } - }, r.prototype.loadPhysics = function(t) { - var i = c.getPlatformManager(); - this.debugMode && i.log("Load Physics : " + t); - var e = this; - try { - i.loadBytes(t, function(t) { - e.physics = l.load(t) - }) - } catch (t) { - console.warn(t) - } - }, r.prototype.hitTestSimple = function(t, i, e) { - if (null === this.live2DModel) return !1; - var r = this.live2DModel.getDrawDataIndex(t); - if (r < 0) return !1; - for (var o = this.live2DModel.getTransformedPoints(r), n = this.live2DModel.getCanvasWidth(), s = 0, _ = this.live2DModel.getCanvasHeight(), a = 0, h = 0; h < o.length; h += 2) { - var l = o[h], - $ = o[h + 1]; - l < n && (n = l), l > s && (s = l), $ < _ && (_ = $), $ > a && (a = $) - } - var u = this.modelMatrix.invertTransformX(i), - p = this.modelMatrix.invertTransformY(e); - return n <= u && u <= s && _ <= p && p <= a - }, r.prototype.hitTestSimpleCustom = function(t, i, e, r) { - return null !== this.live2DModel && (e >= t[0] && e <= i[0] && r <= t[1] && r >= i[1]) - }, o.prototype = new AMotion, o.EXPRESSION_DEFAULT = "DEFAULT", o.TYPE_SET = 0, o.TYPE_ADD = 1, o.TYPE_MULT = 2, o.loadJson = function(t) { - var i = new o, - e = c.getPlatformManager(), - r = e.jsonParseFromBytes(t); - if (i.setFadeIn(parseInt(r.fade_in) > 0 ? parseInt(r.fade_in) : 1e3), i.setFadeOut(parseInt(r.fade_out) > 0 ? parseInt(r.fade_out) : 1e3), null == r.params) return i; - var s = r.params, - _ = s.length; - i.paramList = []; - for (var a = 0; a < _; a++) { - var h = s[a], - l = h.id.toString(), - $ = parseFloat(h.val), - u = o.TYPE_ADD, - p = null != h.calc ? h.calc.toString() : "add"; - if ((u = "add" === p ? o.TYPE_ADD : "mult" === p ? o.TYPE_MULT : "set" === p ? o.TYPE_SET : o.TYPE_ADD) == o.TYPE_ADD) { - var f = null == h.def ? 0 : parseFloat(h.def); - $ -= f - } else if (u == o.TYPE_MULT) { - var f = null == h.def ? 1 : parseFloat(h.def); - 0 == f && (f = 1), $ /= f - } - var d = new n; - d.id = l, d.type = u, d.value = $, i.paramList.push(d) - } - return i - }, o.prototype.updateParamExe = function(t, i, e, r) { - for (var n = this.paramList.length - 1; n >= 0; --n) { - var s = this.paramList[n]; - s.type == o.TYPE_ADD ? t.addToParamFloat(s.id, s.value, e) : s.type == o.TYPE_MULT ? t.multParamFloat(s.id, s.value, e) : s.type == o.TYPE_SET && t.setParamFloat(s.id, s.value, e) - } - }, s.prototype.calcNextBlink = function() { - return UtSystem.getUserTimeMSec() + Math.random() * (2 * this.blinkIntervalMsec - 1) - }, s.prototype.setInterval = function(t) { - this.blinkIntervalMsec = t - }, s.prototype.setEyeMotion = function(t, i, e) { - this.closingMotionMsec = t, this.closedMotionMsec = i, this.openingMotionMsec = e - }, s.prototype.updateParam = function(t) { - var i, e = UtSystem.getUserTimeMSec(), - r = 0; - switch (this.eyeState) { - case g.STATE_CLOSING: - r = (e - this.stateStartTime) / this.closingMotionMsec, r >= 1 && (r = 1, this.eyeState = g.STATE_CLOSED, this.stateStartTime = e), i = 1 - r; - break; - case g.STATE_CLOSED: - r = (e - this.stateStartTime) / this.closedMotionMsec, r >= 1 && (this.eyeState = g.STATE_OPENING, this.stateStartTime = e), i = 0; - break; - case g.STATE_OPENING: - r = (e - this.stateStartTime) / this.openingMotionMsec, r >= 1 && (r = 1, this.eyeState = g.STATE_INTERVAL, this.nextBlinkTime = this.calcNextBlink()), i = r; - break; - case g.STATE_INTERVAL: - this.nextBlinkTime < e && (this.eyeState = g.STATE_CLOSING, this.stateStartTime = e), i = 1; - break; - case g.STATE_FIRST: - default: - this.eyeState = g.STATE_INTERVAL, this.nextBlinkTime = this.calcNextBlink(), i = 1 - } - this.closeIfZero || (i = -i), t.setParamFloat(this.eyeID_L, i), t.setParamFloat(this.eyeID_R, i) - }; - var g = function() {}; - g.STATE_FIRST = "STATE_FIRST", g.STATE_INTERVAL = "STATE_INTERVAL", g.STATE_CLOSING = "STATE_CLOSING", g.STATE_CLOSED = "STATE_CLOSED", g.STATE_OPENING = "STATE_OPENING", _.mul = function(t, i, e) { - var r, o, n, s = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - for (r = 0; r < 4; r++) for (o = 0; o < 4; o++) for (n = 0; n < 4; n++) s[r + 4 * o] += t[r + 4 * n] * i[n + 4 * o]; - for (r = 0; r < 16; r++) e[r] = s[r] - }, _.prototype.identity = function() { - for (var t = 0; t < 16; t++) this.tr[t] = t % 5 == 0 ? 1 : 0 - }, _.prototype.getArray = function() { - return this.tr - }, _.prototype.getCopyMatrix = function() { - return new Float32Array(this.tr) - }, _.prototype.setMatrix = function(t) { - if (null != this.tr && this.tr.length == this.tr.length) for (var i = 0; i < 16; i++) this.tr[i] = t[i] - }, _.prototype.getScaleX = function() { - return this.tr[0] - }, _.prototype.getScaleY = function() { - return this.tr[5] - }, _.prototype.transformX = function(t) { - return this.tr[0] * t + this.tr[12] - }, _.prototype.transformY = function(t) { - return this.tr[5] * t + this.tr[13] - }, _.prototype.invertTransformX = function(t) { - return (t - this.tr[12]) / this.tr[0] - }, _.prototype.invertTransformY = function(t) { - return (t - this.tr[13]) / this.tr[5] - }, _.prototype.multTranslate = function(t, i) { - var e = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t, i, 0, 1]; - _.mul(e, this.tr, this.tr) - }, _.prototype.translate = function(t, i) { - this.tr[12] = t, this.tr[13] = i - }, _.prototype.translateX = function(t) { - this.tr[12] = t - }, _.prototype.translateY = function(t) { - this.tr[13] = t - }, _.prototype.multScale = function(t, i) { - var e = [t, 0, 0, 0, 0, i, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]; - _.mul(e, this.tr, this.tr) - }, _.prototype.scale = function(t, i) { - this.tr[0] = t, this.tr[5] = i - }, a.prototype = new _, a.prototype.setPosition = function(t, i) { - this.translate(t, i) - }, a.prototype.setCenterPosition = function(t, i) { - var e = this.width * this.getScaleX(), - r = this.height * this.getScaleY(); - this.translate(t - e / 2, i - r / 2) - }, a.prototype.top = function(t) { - this.setY(t) - }, a.prototype.bottom = function(t) { - var i = this.height * this.getScaleY(); - this.translateY(t - i) - }, a.prototype.left = function(t) { - this.setX(t) - }, a.prototype.right = function(t) { - var i = this.width * this.getScaleX(); - this.translateX(t - i) - }, a.prototype.centerX = function(t) { - var i = this.width * this.getScaleX(); - this.translateX(t - i / 2) - }, a.prototype.centerY = function(t) { - var i = this.height * this.getScaleY(); - this.translateY(t - i / 2) - }, a.prototype.setX = function(t) { - this.translateX(t) - }, a.prototype.setY = function(t) { - this.translateY(t) - }, a.prototype.setHeight = function(t) { - var i = t / this.height, - e = -i; - this.scale(i, e) - }, a.prototype.setWidth = function(t) { - var i = t / this.width, - e = -i; - this.scale(i, e) - }, h.prototype = new MotionQueueManager, h.prototype.getCurrentPriority = function() { - return this.currentPriority - }, h.prototype.getReservePriority = function() { - return this.reservePriority - }, h.prototype.reserveMotion = function(t) { - return !(this.reservePriority >= t) && (!(this.currentPriority >= t) && (this.reservePriority = t, !0)) - }, h.prototype.setReservePriority = function(t) { - this.reservePriority = t - }, h.prototype.updateParam = function(t) { - var i = MotionQueueManager.prototype.updateParam.call(this, t); - return this.isFinished() && (this.currentPriority = 0), i - }, h.prototype.startMotionPrio = function(t, i) { - return i == this.reservePriority && (this.reservePriority = 0), this.currentPriority = i, this.startMotion(t, !1) - }, l.load = function(t) { - for (var i = new l, e = c.getPlatformManager(), r = e.jsonParseFromBytes(t), o = r.physics_hair, n = o.length, s = 0; s < n; s++) { - var _ = o[s], - a = new PhysicsHair, - h = _.setup, - $ = parseFloat(h.length), - u = parseFloat(h.regist), - p = parseFloat(h.mass); - a.setup($, u, p); - for (var f = _.src, d = f.length, g = 0; g < d; g++) { - var y = f[g], - m = y.id, - T = PhysicsHair.Src.SRC_TO_X, - P = y.ptype; - "x" === P ? T = PhysicsHair.Src.SRC_TO_X : "y" === P ? T = PhysicsHair.Src.SRC_TO_Y : "angle" === P ? T = PhysicsHair.Src.SRC_TO_G_ANGLE : UtDebug.error("live2d", "Invalid parameter:PhysicsHair.Src"); - var S = parseFloat(y.scale), - v = parseFloat(y.weight); - a.addSrcParam(T, m, S, v) - } - for (var L = _.targets, M = L.length, g = 0; g < M; g++) { - var E = L[g], - m = E.id, - T = PhysicsHair.Target.TARGET_FROM_ANGLE, - P = E.ptype; - "angle" === P ? T = PhysicsHair.Target.TARGET_FROM_ANGLE : "angle_v" === P ? T = PhysicsHair.Target.TARGET_FROM_ANGLE_V : UtDebug.error("live2d", "Invalid parameter:PhysicsHair.Target"); - var S = parseFloat(E.scale), - v = parseFloat(E.weight); - a.addTargetParam(T, m, S, v) - } - i.physicsList.push(a) - } - return i - }, l.prototype.updateParam = function(t) { - for (var i = UtSystem.getUserTimeMSec() - this.startTimeMSec, e = 0; e < this.physicsList.length; e++) this.physicsList[e].update(t, i) - }, $.load = function(t) { - for (var i = new $, e = c.getPlatformManager(), r = e.jsonParseFromBytes(t), o = r.parts_visible, n = o.length, s = 0; s < n; s++) { - for (var _ = o[s], a = _.group, h = a.length, l = new Array, p = 0; p < h; p++) { - var f = a[p], - d = new u(f.id); - if (l[p] = d, null != f.link) { - var g = f.link, - y = g.length; - d.link = new Array; - for (var m = 0; m < y; m++) { - var T = new u(g[m]); - d.link.push(T) - } - } - } - i.partsGroups.push(l) - } - return i - }, $.prototype.updateParam = function(t) { - if (null != t) { - t != this.lastModel && this.initParam(t), this.lastModel = t; - var i = UtSystem.getUserTimeMSec(), - e = 0 == this.lastTime ? 0 : (i - this.lastTime) / 1e3; - this.lastTime = i, e < 0 && (e = 0); - for (var r = 0; r < this.partsGroups.length; r++) this.normalizePartsOpacityGroup(t, this.partsGroups[r], e), this.copyOpacityOtherParts(t, this.partsGroups[r]) - } - }, $.prototype.initParam = function(t) { - if (null != t) for (var i = 0; i < this.partsGroups.length; i++) for (var e = this.partsGroups[i], r = 0; r < e.length; r++) { - e[r].initIndex(t); - var o = e[r].partsIndex, - n = e[r].paramIndex; - if (!(o < 0)) { - var s = 0 != t.getParamFloat(n); - if (t.setPartsOpacity(o, s ? 1 : 0), t.setParamFloat(n, s ? 1 : 0), null != e[r].link) for (var _ = 0; _ < e[r].link.length; _++) e[r].link[_].initIndex(t) - } - } - }, $.prototype.normalizePartsOpacityGroup = function(t, i, e) { - for (var r = -1, o = 1, n = 0; n < i.length; n++) { - var s = i[n].partsIndex, - _ = i[n].paramIndex; - if (!(s < 0) && 0 != t.getParamFloat(_)) { - if (r >= 0) break; - r = n, o = t.getPartsOpacity(s), o += e / .5, o > 1 && (o = 1) - } - } - r < 0 && (r = 0, o = 1); - for (var n = 0; n < i.length; n++) { - var s = i[n].partsIndex; - if (!(s < 0)) if (r == n) t.setPartsOpacity(s, o); - else { - var a, h = t.getPartsOpacity(s); - a = o < .5 ? -.5 * o / .5 + 1 : .5 * (1 - o) / .5; - var l = (1 - a) * (1 - o); - l > .15 && (a = 1 - .15 / (1 - o)), h > a && (h = a), t.setPartsOpacity(s, h) - } - } - }, $.prototype.copyOpacityOtherParts = function(t, i) { - for (var e = 0; e < i.length; e++) { - var r = i[e]; - if (null != r.link && !(r.partsIndex < 0)) for (var o = t.getPartsOpacity(r.partsIndex), n = 0; n < r.link.length; n++) { - var s = r.link[n]; - s.partsIndex < 0 || t.setPartsOpacity(s.partsIndex, o) - } - } - }, u.prototype.initIndex = function(t) { - this.paramIndex = t.getParamIndex("VISIBLE:" + this.id), this.partsIndex = t.getPartsDataIndex(PartsDataID.getID(this.id)), t.setParamFloat(this.paramIndex, 1) - }, p.FRAME_RATE = 30, p.prototype.setPoint = function(t, i) { - this.faceTargetX = t, this.faceTargetY = i - }, p.prototype.getX = function() { - return this.faceX - }, p.prototype.getY = function() { - return this.faceY - }, p.prototype.update = function() { - var t = 40 / 7.5 / p.FRAME_RATE; - if (0 == this.lastTimeSec) return void(this.lastTimeSec = UtSystem.getUserTimeMSec()); - var i = UtSystem.getUserTimeMSec(), - e = (i - this.lastTimeSec) * p.FRAME_RATE / 1e3; - this.lastTimeSec = i; - var r = .15 * p.FRAME_RATE, - o = e * t / r, - n = this.faceTargetX - this.faceX, - s = this.faceTargetY - this.faceY; - if (!(Math.abs(n) <= this.EPSILON && Math.abs(s) <= this.EPSILON)) { - var _ = Math.sqrt(n * n + s * s), - a = t * n / _, - h = t * s / _, - l = a - this.faceVX, - $ = h - this.faceVY, - u = Math.sqrt(l * l + $ * $); - (u < -o || u > o) && (l *= o / u, $ *= o / u, u = o), this.faceVX += l, this.faceVY += $; - var f = .5 * (Math.sqrt(o * o + 16 * o * _ - 8 * o * _) - o), - c = Math.sqrt(this.faceVX * this.faceVX + this.faceVY * this.faceVY); - c > f && (this.faceVX *= f / c, this.faceVY *= f / c), this.faceX += this.faceVX, this.faceY += this.faceVY - } - }, f.prototype = new _, f.prototype.getMaxScale = function() { - return this.max - }, f.prototype.getMinScale = function() { - return this.min - }, f.prototype.setMaxScale = function(t) { - this.max = t - }, f.prototype.setMinScale = function(t) { - this.min = t - }, f.prototype.isMaxScale = function() { - return this.getScaleX() == this.max - }, f.prototype.isMinScale = function() { - return this.getScaleX() == this.min - }, f.prototype.adjustTranslate = function(t, i) { - this.tr[0] * this.maxLeft + (this.tr[12] + t) > this.screenLeft && (t = this.screenLeft - this.tr[0] * this.maxLeft - this.tr[12]), this.tr[0] * this.maxRight + (this.tr[12] + t) < this.screenRight && (t = this.screenRight - this.tr[0] * this.maxRight - this.tr[12]), this.tr[5] * this.maxTop + (this.tr[13] + i) < this.screenTop && (i = this.screenTop - this.tr[5] * this.maxTop - this.tr[13]), this.tr[5] * this.maxBottom + (this.tr[13] + i) > this.screenBottom && (i = this.screenBottom - this.tr[5] * this.maxBottom - this.tr[13]); - var e = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t, i, 0, 1]; - _.mul(e, this.tr, this.tr) - }, f.prototype.adjustScale = function(t, i, e) { - var r = e * this.tr[0]; - r < this.min ? this.tr[0] > 0 && (e = this.min / this.tr[0]) : r > this.max && this.tr[0] > 0 && (e = this.max / this.tr[0]); - var o = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t, i, 0, 1], - n = [e, 0, 0, 0, 0, e, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], - s = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, -t, -i, 0, 1]; - _.mul(s, this.tr, this.tr), _.mul(n, this.tr, this.tr), _.mul(o, this.tr, this.tr) - }, f.prototype.setScreenRect = function(t, i, e, r) { - this.screenLeft = t, this.screenRight = i, this.screenTop = r, this.screenBottom = e - }, f.prototype.setMaxScreenRect = function(t, i, e, r) { - this.maxLeft = t, this.maxRight = i, this.maxTop = r, this.maxBottom = e - }, f.prototype.getScreenLeft = function() { - return this.screenLeft - }, f.prototype.getScreenRight = function() { - return this.screenRight - }, f.prototype.getScreenBottom = function() { - return this.screenBottom - }, f.prototype.getScreenTop = function() { - return this.screenTop - }, f.prototype.getMaxLeft = function() { - return this.maxLeft - }, f.prototype.getMaxRight = function() { - return this.maxRight - }, f.prototype.getMaxBottom = function() { - return this.maxBottom - }, f.prototype.getMaxTop = function() { - return this.maxTop - }, c.platformManager = null, c.getPlatformManager = function() { - return c.platformManager - }, c.setPlatformManager = function(t) { - c.platformManager = t - }, t.exports = { - L2DTargetPoint: p, - Live2DFramework: c, - L2DViewMatrix: f, - L2DPose: $, - L2DPartsParam: u, - L2DPhysics: l, - L2DMotionManager: h, - L2DModelMatrix: a, - L2DMatrix44: _, - EYE_STATE: g, - L2DEyeBlink: s, - L2DExpressionParam: n, - L2DExpressionMotion: o, - L2DBaseModel: r - } -}, function(t, i, e) { - "use strict"; - var r = { - DEBUG_LOG: !1, - DEBUG_MOUSE_LOG: !1, - DEBUG_DRAW_HIT_AREA: !1, - DEBUG_DRAW_ALPHA_MODEL: !1, - VIEW_MAX_SCALE: 2, - VIEW_MIN_SCALE: .8, - VIEW_LOGICAL_LEFT: -1, - VIEW_LOGICAL_RIGHT: 1, - VIEW_LOGICAL_MAX_LEFT: -2, - VIEW_LOGICAL_MAX_RIGHT: 2, - VIEW_LOGICAL_MAX_BOTTOM: -2, - VIEW_LOGICAL_MAX_TOP: 2, - PRIORITY_NONE: 0, - PRIORITY_IDLE: 1, - PRIORITY_SLEEPY: 2, - PRIORITY_NORMAL: 3, - PRIORITY_FORCE: 4, - MOTION_GROUP_IDLE: "idle", - MOTION_GROUP_SLEEPY: "sleepy", - MOTION_GROUP_TAP_BODY: "tap_body", - MOTION_GROUP_FLICK_HEAD: "flick_head", - MOTION_GROUP_PINCH_IN: "pinch_in", - MOTION_GROUP_PINCH_OUT: "pinch_out", - MOTION_GROUP_SHAKE: "shake", - HIT_AREA_HEAD: "head", - HIT_AREA_BODY: "body" - }; - t.exports = r -}, function(t, i, e) { - "use strict"; - - function r(t) { - n = t - } - function o() { - return n - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i.setContext = r, i.getContext = o; - var n = void 0 -}, function(t, i, e) { - "use strict"; - - function r() {} - r.matrixStack = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], r.depth = 0, r.currentMatrix = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], r.tmp = new Array(16), r.reset = function() { - this.depth = 0 - }, r.loadIdentity = function() { - for (var t = 0; t < 16; t++) this.currentMatrix[t] = t % 5 == 0 ? 1 : 0 - }, r.push = function() { - var t = (this.depth, 16 * (this.depth + 1)); - this.matrixStack.length < t + 16 && (this.matrixStack.length = t + 16); - for (var i = 0; i < 16; i++) this.matrixStack[t + i] = this.currentMatrix[i]; - this.depth++ - }, r.pop = function() { - --this.depth < 0 && (myError("Invalid matrix stack."), this.depth = 0); - for (var t = 16 * this.depth, i = 0; i < 16; i++) this.currentMatrix[i] = this.matrixStack[t + i] - }, r.getMatrix = function() { - return this.currentMatrix - }, r.multMatrix = function(t) { - var i, e, r; - for (i = 0; i < 16; i++) this.tmp[i] = 0; - for (i = 0; i < 4; i++) for (e = 0; e < 4; e++) for (r = 0; r < 4; r++) this.tmp[i + 4 * e] += this.currentMatrix[i + 4 * r] * t[r + 4 * e]; - for (i = 0; i < 16; i++) this.currentMatrix[i] = this.tmp[i] - }, t.exports = r -}, function(t, i, e) { - t.exports = e(5) -}, function(t, i, e) { - "use strict"; - - function r(t) { - return t && t.__esModule ? t : { - default: - t - } - } - function o(t) { - C = document.getElementById(t), C.addEventListener && (window.addEventListener("click", g), window.addEventListener("mousedown", g), window.addEventListener("mousemove", g), window.addEventListener("mouseup", g), document.addEventListener("mouseout", g), window.addEventListener("touchstart", y), window.addEventListener("touchend", y), window.addEventListener("touchmove", y)) - } - function n(t) { - var i = C.width, - e = C.height; - N = new M.L2DTargetPoint; - var r = e / i, - o = w. - default.VIEW_LOGICAL_LEFT, - n = w. - default.VIEW_LOGICAL_RIGHT, - _ = -r, - h = r; - if (window.Live2D.captureFrame = !1, B = new M.L2DViewMatrix, B.setScreenRect(o, n, _, h), B.setMaxScreenRect(w. - default.VIEW_LOGICAL_MAX_LEFT, w. - default.VIEW_LOGICAL_MAX_RIGHT, w. - default.VIEW_LOGICAL_MAX_BOTTOM, w. - default.VIEW_LOGICAL_MAX_TOP), B.setMaxScale(w. - default.VIEW_MAX_SCALE), B.setMinScale(w. - default.VIEW_MIN_SCALE), U = new M.L2DMatrix44, U.multScale(1, i / e), G = new M.L2DMatrix44, G.multTranslate(-i / 2, -e / 2), G.multScale(2 / i, -2 / i), F = v(), (0, D.setContext)(F), !F) return console.error("Failed to create WebGL context."), void(window.WebGLRenderingContext && console.error("Your browser don't support WebGL, check https://get.webgl.org/ for futher information.")); - window.Live2D.setGL(F), F.clearColor(0, 0, 0, 0), a(t), s() - } - function s() { - b || (b = !0, function t() { - _(); - var i = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame; - if (window.Live2D.captureFrame) { - window.Live2D.captureFrame = !1; - var e = document.createElement("a"); - document.body.appendChild(e), e.setAttribute("type", "hidden"), e.href = C.toDataURL(), e.download = window.Live2D.captureName || "live2d.png", e.click() - } - i(t, C) - }()) - } - function _() { - O. - default.reset(), O. - default.loadIdentity(), N.update(), R.setDrag(N.getX(), N.getY()), F.clear(F.COLOR_BUFFER_BIT), O. - default.multMatrix(U.getArray()), O. - default.multMatrix(B.getArray()), O. - default.push(); - for (var t = 0; t < R.numModels(); t++) { - var i = R.getModel(t); - if (null == i) return; - i.initialized && !i.updating && (i.update(), i.draw(F)) - } - O. - default.pop() - } - function a(t) { - R.reloadFlg = !0, R.count++, R.changeModel(F, t) - } - function h(t, i) { - return t.x * i.x + t.y * i.y - } - function l(t, i) { - var e = Math.sqrt(t * t + i * i); - return { - x: t / e, - y: i / e - } - } - function $(t, i, e) { - function r(t, i) { - return 180 * Math.acos(h({ - x: 0, - y: 1 - }, l(t, i))) / Math.PI - } - if (i.x < e.left + e.width && i.y < e.top + e.height && i.x > e.left && i.y > e.top) return i; - var o = t.x - i.x, - n = t.y - i.y, - s = r(o, n); - i.x < t.x && (s = 360 - s); - var _ = 360 - r(e.left - t.x, -1 * (e.top - t.y)), - a = 360 - r(e.left - t.x, -1 * (e.top + e.height - t.y)), - $ = r(e.left + e.width - t.x, -1 * (e.top - t.y)), - u = r(e.left + e.width - t.x, -1 * (e.top + e.height - t.y)), - p = n / o, - f = {}; - if (s < $) { - var c = e.top - t.y, - d = c / p; - f = { - y: t.y + c, - x: t.x + d - } - } else if (s < u) { - var g = e.left + e.width - t.x, - y = g * p; - f = { - y: t.y + y, - x: t.x + g - } - } else if (s < a) { - var m = e.top + e.height - t.y, - T = m / p; - f = { - y: t.y + m, - x: t.x + T - } - } else if (s < _) { - var P = t.x - e.left, - S = P * p; - f = { - y: t.y - S, - x: t.x - P - } - } else { - var v = e.top - t.y, - L = v / p; - f = { - y: t.y + v, - x: t.x + L - } - } - return f - } - function u(t) { - Y = !0; - var i = C.getBoundingClientRect(), - e = P(t.clientX - i.left), - r = S(t.clientY - i.top), - o = $({ - x: i.left + i.width / 2, - y: i.top + i.height * X - }, { - x: t.clientX, - y: t.clientY - }, i), - n = m(o.x - i.left), - s = T(o.y - i.top); - w. - default.DEBUG_MOUSE_LOG && console.log("onMouseMove device( x:" + t.clientX + " y:" + t.clientY + " ) view( x:" + n + " y:" + s + ")"), k = e, V = r, N.setPoint(n, s) - } - function p(t) { - Y = !0; - var i = C.getBoundingClientRect(), - e = P(t.clientX - i.left), - r = S(t.clientY - i.top), - o = $({ - x: i.left + i.width / 2, - y: i.top + i.height * X - }, { - x: t.clientX, - y: t.clientY - }, i), - n = m(o.x - i.left), - s = T(o.y - i.top); - w. - default.DEBUG_MOUSE_LOG && console.log("onMouseDown device( x:" + t.clientX + " y:" + t.clientY + " ) view( x:" + n + " y:" + s + ")"), k = e, V = r, R.tapEvent(n, s) - } - function f(t) { - var i = C.getBoundingClientRect(), - e = P(t.clientX - i.left), - r = S(t.clientY - i.top), - o = $({ - x: i.left + i.width / 2, - y: i.top + i.height * X - }, { - x: t.clientX, - y: t.clientY - }, i), - n = m(o.x - i.left), - s = T(o.y - i.top); - w. - default.DEBUG_MOUSE_LOG && console.log("onMouseMove device( x:" + t.clientX + " y:" + t.clientY + " ) view( x:" + n + " y:" + s + ")"), Y && (k = e, V = r, N.setPoint(n, s)) - } - function c() { - Y && (Y = !1), N.setPoint(0, 0) - } - function d() { - w. - default.DEBUG_LOG && console.log("Set Session Storage."), sessionStorage.setItem("Sleepy", "1") - } - function g(t) { - if ("mousewheel" == t.type); - else if ("mousedown" == t.type) p(t); - else if ("mousemove" == t.type) { - var i = sessionStorage.getItem("Sleepy"); - "1" === i && sessionStorage.setItem("Sleepy", "0"), u(t) - } else if ("mouseup" == t.type) { - if ("button" in t && 0 != t.button) return - } else if ("mouseout" == t.type) { - w. - default.DEBUG_LOG && console.log("Mouse out Window."), c(); - var e = sessionStorage.getItem("SleepyTimer"); - window.clearTimeout(e), e = window.setTimeout(d, 5e4), sessionStorage.setItem("SleepyTimer", e) - } - } - function y(t) { - var i = t.touches[0]; - "touchstart" == t.type ? 1 == t.touches.length && u(i) : "touchmove" == t.type ? f(i) : "touchend" == t.type && c() - } - function m(t) { - var i = G.transformX(t); - return B.invertTransformX(i) - } - function T(t) { - var i = G.transformY(t); - return B.invertTransformY(i) - } - function P(t) { - return G.transformX(t) - } - function S(t) { - return G.transformY(t) - } - function v() { - for (var t = ["webgl", "experimental-webgl", "webkit-3d", "moz-webgl"], i = 0; i < t.length; i++) try { - var e = C.getContext(t[i], { - premultipliedAlpha: !0 - }); - if (e) return e - } catch (t) {} - return null - } - function L(t, i, e) { - X = void 0 === e ? .5 : e, o(t), n(i) - } - e(6); - var M = e(0), - E = e(8), - A = r(E), - I = e(1), - w = r(I), - x = e(3), - O = r(x), - D = e(2), - R = (window.navigator.platform.toLowerCase(), new A. - default), - b = !1, - F = null, - C = null, - N = null, - B = null, - U = null, - G = null, - Y = !1, - k = 0, - V = 0, - X = .5; - window.loadlive2d = L -}, function(t, i, e) { - "use strict"; - (function(t) { - ! - function() { - function i() { - At || (this._$MT = null, this._$5S = null, this._$NP = 0, i._$42++, this._$5S = new Y(this)) - } - function e(t) { - if (!At) { - this.clipContextList = new Array, this.glcontext = t.gl, this.dp_webgl = t, this.curFrameNo = 0, this.firstError_clipInNotUpdate = !0, this.colorBuffer = 0, this.isInitGLFBFunc = !1, this.tmpBoundsOnModel = new S, at.glContext.length > at.frameBuffers.length && (this.curFrameNo = this.getMaskRenderTexture()), this.tmpModelToViewMatrix = new R, this.tmpMatrix2 = new R, this.tmpMatrixForMask = new R, this.tmpMatrixForDraw = new R, this.CHANNEL_COLORS = new Array; - var i = new A; - i = new A, i.r = 0, i.g = 0, i.b = 0, i.a = 1, this.CHANNEL_COLORS.push(i), i = new A, i.r = 1, i.g = 0, i.b = 0, i.a = 0, this.CHANNEL_COLORS.push(i), i = new A, i.r = 0, i.g = 1, i.b = 0, i.a = 0, this.CHANNEL_COLORS.push(i), i = new A, i.r = 0, i.g = 0, i.b = 1, i.a = 0, this.CHANNEL_COLORS.push(i); - for (var e = 0; e < this.CHANNEL_COLORS.length; e++) this.dp_webgl.setChannelFlagAsColor(e, this.CHANNEL_COLORS[e]) - } - } - function r(t, i, e) { - this.clipIDList = new Array, this.clipIDList = e, this.clippingMaskDrawIndexList = new Array; - for (var r = 0; r < e.length; r++) this.clippingMaskDrawIndexList.push(i.getDrawDataIndex(e[r])); - this.clippedDrawContextList = new Array, this.isUsing = !0, this.layoutChannelNo = 0, this.layoutBounds = new S, this.allClippedDrawRect = new S, this.matrixForMask = new Float32Array(16), this.matrixForDraw = new Float32Array(16), this.owner = t - } - function o(t, i) { - this._$gP = t, this.drawDataIndex = i - } - function n() { - At || (this.color = null) - } - function s() { - At || (this._$dP = null, this._$eo = null, this._$V0 = null, this._$dP = 1e3, this._$eo = 1e3, this._$V0 = 1, this._$a0()) - } - function _() {} - function a() { - this._$r = null, this._$0S = null - } - function h() { - At || (this.x = null, this.y = null, this.width = null, this.height = null) - } - function l(t) { - At || et.prototype.constructor.call(this, t) - } - function $() {} - function u(t) { - At || et.prototype.constructor.call(this, t) - } - function p() { - At || (this._$vo = null, this._$F2 = null, this._$ao = 400, this._$1S = 400, p._$42++) - } - function f() { - At || (this.p1 = new c, this.p2 = new c, this._$Fo = 0, this._$Db = 0, this._$L2 = 0, this._$M2 = 0, this._$ks = 0, this._$9b = 0, this._$iP = 0, this._$iT = 0, this._$lL = new Array, this._$qP = new Array, this.setup(.3, .5, .1)) - } - function c() { - this._$p = 1, this.x = 0, this.y = 0, this.vx = 0, this.vy = 0, this.ax = 0, this.ay = 0, this.fx = 0, this.fy = 0, this._$s0 = 0, this._$70 = 0, this._$7L = 0, this._$HL = 0 - } - function d(t, i, e) { - this._$wL = null, this.scale = null, this._$V0 = null, this._$wL = t, this.scale = i, this._$V0 = e - } - function g(t, i, e, r) { - d.prototype.constructor.call(this, i, e, r), this._$tL = null, this._$tL = t - } - function y(t, i, e) { - this._$wL = null, this.scale = null, this._$V0 = null, this._$wL = t, this.scale = i, this._$V0 = e - } - function T(t, i, e, r) { - y.prototype.constructor.call(this, i, e, r), this._$YP = null, this._$YP = t - } - function P() { - At || (this._$fL = 0, this._$gL = 0, this._$B0 = 1, this._$z0 = 1, this._$qT = 0, this.reflectX = !1, this.reflectY = !1) - } - function S() { - At || (this.x = null, this.y = null, this.width = null, this.height = null) - } - function v() {} - function L() { - At || (this.x = null, this.y = null) - } - function M() { - At || (this._$gP = null, this._$dr = null, this._$GS = null, this._$qb = null, this._$Lb = null, this._$mS = null, this.clipID = null, this.clipIDList = new Array) - } - function E() { - At || (this._$Eb = E._$ps, this._$lT = 1, this._$C0 = 1, this._$tT = 1, this._$WL = 1, this.culling = !1, this.matrix4x4 = new Float32Array(16), this.premultipliedAlpha = !1, this.anisotropy = 0, this.clippingProcess = E.CLIPPING_PROCESS_NONE, this.clipBufPre_clipContextMask = null, this.clipBufPre_clipContextDraw = null, this.CHANNEL_COLORS = new Array) - } - function A() { - At || (this.a = 1, this.r = 1, this.g = 1, this.b = 1, this.scale = 1, this._$ho = 1, this.blendMode = at.L2D_COLOR_BLEND_MODE_MULT) - } - function I() { - At || (this._$kP = null, this._$dr = null, this._$Ai = !0, this._$mS = null) - } - function w() {} - function x() { - At || (this._$VP = 0, this._$wL = null, this._$GP = null, this._$8o = x._$ds, this._$2r = -1, this._$O2 = 0, this._$ri = 0) - } - function O() {} - function D() { - At || (this._$Ob = null) - } - function R() { - this.m = new Float32Array(16), this.identity() - } - function b(t) { - At || et.prototype.constructor.call(this, t) - } - function F() { - At || (this._$7 = 1, this._$f = 0, this._$H = 0, this._$g = 1, this._$k = 0, this._$w = 0, this._$hi = STATE_IDENTITY, this._$Z = _$pS) - } - function C() { - At || (s.prototype.constructor.call(this), this.motions = new Array, this._$7r = null, this._$7r = C._$Co++, this._$D0 = 30, this._$yT = 0, this._$E = !0, this.loopFadeIn = !0, this._$AS = -1, _$a0()) - } - function N() { - this._$P = new Float32Array(100), this.size = 0 - } - function B() { - this._$4P = null, this._$I0 = null, this._$RP = null - } - function U() {} - function G() {} - function Y(t) { - At || (this._$QT = !0, this._$co = -1, this._$qo = 0, this._$pb = new Array(Y._$is), this._$_2 = new Float32Array(Y._$is), this._$vr = new Float32Array(Y._$is), this._$Rr = new Float32Array(Y._$is), this._$Or = new Float32Array(Y._$is), this._$fs = new Float32Array(Y._$is), this._$Js = new Array(Y._$is), this._$3S = new Array, this._$aS = new Array, this._$Bo = null, this._$F2 = new Array, this._$db = new Array, this._$8b = new Array, this._$Hr = new Array, this._$Ws = null, this._$Vs = null, this._$Er = null, this._$Es = new Int16Array(U._$Qb), this._$ZP = new Float32Array(2 * U._$1r), this._$Ri = t, this._$b0 = Y._$HP++, this.clipManager = null, this.dp_webgl = null) - } - function k() {} - function V() { - At || (this._$12 = null, this._$bb = null, this._$_L = null, this._$jo = null, this._$iL = null, this._$0L = null, this._$Br = null, this._$Dr = null, this._$Cb = null, this._$mr = null, this._$_L = wt.STATE_FIRST, this._$Br = 4e3, this._$Dr = 100, this._$Cb = 50, this._$mr = 150, this._$jo = !0, this._$iL = "PARAM_EYE_L_OPEN", this._$0L = "PARAM_EYE_R_OPEN") - } - function X() { - At || (E.prototype.constructor.call(this), this._$sb = new Int32Array(X._$As), this._$U2 = new Array, this.transform = null, this.gl = null, null == X._$NT && (X._$NT = X._$9r(256), X._$vS = X._$9r(256), X._$no = X._$vb(256))) - } - function z() { - At || (I.prototype.constructor.call(this), this._$GS = null, this._$Y0 = null) - } - function H(t) { - _t.prototype.constructor.call(this, t), this._$8r = I._$ur, this._$Yr = null, this._$Wr = null - } - function W() { - At || (M.prototype.constructor.call(this), this._$gP = null, this._$dr = null, this._$GS = null, this._$qb = null, this._$Lb = null, this._$mS = null) - } - function j() { - At || (this._$NL = null, this._$3S = null, this._$aS = null, j._$42++) - } - function q() { - At || (i.prototype.constructor.call(this), this._$zo = new X) - } - function J() { - At || (s.prototype.constructor.call(this), this.motions = new Array, this._$o2 = null, this._$7r = J._$Co++, this._$D0 = 30, this._$yT = 0, this._$E = !1, this.loopFadeIn = !0, this._$rr = -1, this._$eP = 0) - } - function Q(t, i) { - return String.fromCharCode(t.getUint8(i)) - } - function N() { - this._$P = new Float32Array(100), this.size = 0 - } - function B() { - this._$4P = null, this._$I0 = null, this._$RP = null - } - function Z() { - At || (I.prototype.constructor.call(this), this._$o = 0, this._$A = 0, this._$GS = null, this._$Eo = null) - } - function K(t) { - _t.prototype.constructor.call(this, t), this._$8r = I._$ur, this._$Cr = null, this._$hr = null - } - function tt() { - At || (this.visible = !0, this._$g0 = !1, this._$NL = null, this._$3S = null, this._$aS = null, tt._$42++) - } - function it(t) { - this._$VS = null, this._$e0 = null, this._$e0 = t - } - function et(t) { - At || (this.id = t) - } - function rt() {} - function ot() { - At || (this._$4S = null) - } - function nt(t, i) { - this.canvas = t, this.context = i, this.viewport = new Array(0, 0, t.width, t.height), this._$6r = 1, this._$xP = 0, this._$3r = 1, this._$uP = 0, this._$Qo = -1, this.cacheImages = {} - } - function st() { - At || (this._$TT = null, this._$LT = null, this._$FS = null, this._$wL = null) - } - function _t(t) { - At || (this._$e0 = null, this._$IP = null, this._$JS = !1, this._$AT = !0, this._$e0 = t, this.totalScale = 1, this._$7s = 1, this.totalOpacity = 1) - } - function at() {} - function ht() {} - function lt(t) { - At || (this._$ib = t) - } - function $t() { - At || (W.prototype.constructor.call(this), this._$LP = -1, this._$d0 = 0, this._$Yo = 0, this._$JP = null, this._$5P = null, this._$BP = null, this._$Eo = null, this._$Qi = null, this._$6s = $t._$ms, this.culling = !0, this.gl_cacheImage = null, this.instanceNo = $t._$42++) - } - function ut(t) { - Mt.prototype.constructor.call(this, t), this._$8r = W._$ur, this._$Cr = null, this._$hr = null - } - function pt() { - At || (this.x = null, this.y = null) - } - function ft(t) { - At || (i.prototype.constructor.call(this), this.drawParamWebGL = new mt(t), this.drawParamWebGL.setGL(at.getGL(t))) - } - function ct() { - At || (this.motions = null, this._$eb = !1, this.motions = new Array) - } - function dt() { - this._$w0 = null, this._$AT = !0, this._$9L = !1, this._$z2 = -1, this._$bs = -1, this._$Do = -1, this._$sr = null, this._$sr = dt._$Gs++ - } - function gt() { - this.m = new Array(1, 0, 0, 0, 1, 0, 0, 0, 1) - } - function yt(t) { - At || et.prototype.constructor.call(this, t) - } - function mt(t) { - At || (E.prototype.constructor.call(this), this.textures = new Array, this.transform = null, this.gl = null, this.glno = t, this.firstDraw = !0, this.anisotropyExt = null, this.maxAnisotropy = 0, this._$As = 32, this._$Gr = !1, this._$NT = null, this._$vS = null, this._$no = null, this.vertShader = null, this.fragShader = null, this.vertShaderOff = null, this.fragShaderOff = null) - } - function Tt(t, i, e) { - return null == i && (i = t.createBuffer()), t.bindBuffer(t.ARRAY_BUFFER, i), t.bufferData(t.ARRAY_BUFFER, e, t.DYNAMIC_DRAW), i - } - function Pt(t, i, e) { - return null == i && (i = t.createBuffer()), t.bindBuffer(t.ELEMENT_ARRAY_BUFFER, i), t.bufferData(t.ELEMENT_ARRAY_BUFFER, e, t.DYNAMIC_DRAW), i - } - function St(t) { - At || (this._$P = new Int8Array(8), this._$R0 = new DataView(this._$P.buffer), this._$3i = new Int8Array(1e3), this._$hL = 0, this._$v0 = 0, this._$S2 = 0, this._$Ko = new Array, this._$T = t, this._$F = 0) - } - function vt() {} - function Lt() {} - function Mt(t) { - At || (this._$e0 = null, this._$IP = null, this._$Us = null, this._$7s = null, this._$IS = [!1], this._$VS = null, this._$AT = !0, this.baseOpacity = 1, this.clipBufPre_clipContext = null, this._$e0 = t) - } - function Et() {} - var At = !0; - i._$0s = 1, i._$4s = 2, i._$42 = 0, i._$62 = function(t, e) { - try { - if (e instanceof ArrayBuffer && (e = new DataView(e)), !(e instanceof DataView)) throw new lt("_$SS#loadModel(b) / b _$x be DataView or ArrayBuffer"); - var r, o = new St(e), - n = o._$ST(), - s = o._$ST(), - a = o._$ST(); - if (109 != n || 111 != s || 99 != a) throw new lt("_$gi _$C _$li , _$Q0 _$P0."); - if (r = o._$ST(), o._$gr(r), r > G._$T7) { - t._$NP |= i._$4s; - throw new lt("_$gi _$C _$li , _$n0 _$_ version _$li ( SDK : " + G._$T7 + " < _$f0 : " + r + " )@_$SS#loadModel()\n") - } - var h = o._$nP(); - if (r >= G._$s7) { - var l = o._$9T(), - $ = o._$9T(); - if (-30584 != l || -30584 != $) throw t._$NP |= i._$0s, new lt("_$gi _$C _$li , _$0 _$6 _$Ui.") - } - t._$KS(h); - var u = t.getModelContext(); - u.setDrawParam(t.getDrawParam()), u.init() - } catch (t) { - _._$Rb(t) - } - }, i.prototype._$KS = function(t) { - this._$MT = t - }, i.prototype.getModelImpl = function() { - return null == this._$MT && (this._$MT = new p, this._$MT._$zP()), this._$MT - }, i.prototype.getCanvasWidth = function() { - return null == this._$MT ? 0 : this._$MT.getCanvasWidth() - }, i.prototype.getCanvasHeight = function() { - return null == this._$MT ? 0 : this._$MT.getCanvasHeight() - }, i.prototype.getParamFloat = function(t) { - return "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), this._$5S.getParamFloat(t) - }, i.prototype.setParamFloat = function(t, i, e) { - "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), arguments.length < 3 && (e = 1), this._$5S.setParamFloat(t, this._$5S.getParamFloat(t) * (1 - e) + i * e) - }, i.prototype.addToParamFloat = function(t, i, e) { - "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), arguments.length < 3 && (e = 1), this._$5S.setParamFloat(t, this._$5S.getParamFloat(t) + i * e) - }, i.prototype.multParamFloat = function(t, i, e) { - "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), arguments.length < 3 && (e = 1), this._$5S.setParamFloat(t, this._$5S.getParamFloat(t) * (1 + (i - 1) * e)) - }, i.prototype.getParamIndex = function(t) { - return this._$5S.getParamIndex(u.getID(t)) - }, i.prototype.loadParam = function() { - this._$5S.loadParam() - }, i.prototype.saveParam = function() { - this._$5S.saveParam() - }, i.prototype.init = function() { - this._$5S.init() - }, i.prototype.update = function() { - this._$5S.update() - }, i.prototype._$Rs = function() { - return _._$li("_$60 _$PT _$Rs()"), -1 - }, i.prototype._$Ds = function(t) { - _._$li("_$60 _$PT _$SS#_$Ds() \n") - }, i.prototype._$K2 = function() {}, i.prototype.draw = function() {}, i.prototype.getModelContext = function() { - return this._$5S - }, i.prototype._$s2 = function() { - return this._$NP - }, i.prototype._$P7 = function(t, i, e, r) { - var o = -1, - n = 0, - s = this; - if (0 != e) if (1 == t.length) { - var _ = t[0], - a = 0 != s.getParamFloat(_), - h = i[0], - l = s.getPartsOpacity(h), - $ = e / r; - a ? (l += $) > 1 && (l = 1) : (l -= $) < 0 && (l = 0), s.setPartsOpacity(h, l) - } else { - for (var u = 0; u < t.length; u++) { - var _ = t[u], - p = 0 != s.getParamFloat(_); - if (p) { - if (o >= 0) break; - o = u; - var h = i[u]; - n = s.getPartsOpacity(h), n += e / r, n > 1 && (n = 1) - } - } - o < 0 && (console.log("No _$wi _$q0/ _$U default[%s]", t[0]), o = 0, n = 1, s.loadParam(), s.setParamFloat(t[o], n), s.saveParam()); - for (var u = 0; u < t.length; u++) { - var h = i[u]; - if (o == u) s.setPartsOpacity(h, n); - else { - var f, c = s.getPartsOpacity(h); - f = n < .5 ? -.5 * n / .5 + 1 : .5 * (1 - n) / .5; - var d = (1 - f) * (1 - n); - d > .15 && (f = 1 - .15 / (1 - n)), c > f && (c = f), s.setPartsOpacity(h, c) - } - } - } else for (var u = 0; u < t.length; u++) { - var _ = t[u], - h = i[u], - p = 0 != s.getParamFloat(_); - s.setPartsOpacity(h, p ? 1 : 0) - } - }, i.prototype.setPartsOpacity = function(t, i) { - "number" != typeof t && (t = this._$5S.getPartsDataIndex(l.getID(t))), this._$5S.setPartsOpacity(t, i) - }, i.prototype.getPartsDataIndex = function(t) { - return t instanceof l || (t = l.getID(t)), this._$5S.getPartsDataIndex(t) - }, i.prototype.getPartsOpacity = function(t) { - return "number" != typeof t && (t = this._$5S.getPartsDataIndex(l.getID(t))), t < 0 ? 0 : this._$5S.getPartsOpacity(t) - }, i.prototype.getDrawParam = function() {}, i.prototype.getDrawDataIndex = function(t) { - return this._$5S.getDrawDataIndex(b.getID(t)) - }, i.prototype.getDrawData = function(t) { - return this._$5S.getDrawData(t) - }, i.prototype.getTransformedPoints = function(t) { - var i = this._$5S._$C2(t); - return i instanceof ut ? i.getTransformedPoints() : null - }, i.prototype.getIndexArray = function(t) { - if (t < 0 || t >= this._$5S._$aS.length) return null; - var i = this._$5S._$aS[t]; - return null != i && i.getType() == W._$wb && i instanceof $t ? i.getIndexArray() : null - }, e.CHANNEL_COUNT = 4, e.RENDER_TEXTURE_USE_MIPMAP = !1, e.NOT_USED_FRAME = -100, e.prototype._$L7 = function() { - if (this.tmpModelToViewMatrix && (this.tmpModelToViewMatrix = null), this.tmpMatrix2 && (this.tmpMatrix2 = null), this.tmpMatrixForMask && (this.tmpMatrixForMask = null), this.tmpMatrixForDraw && (this.tmpMatrixForDraw = null), this.tmpBoundsOnModel && (this.tmpBoundsOnModel = null), this.CHANNEL_COLORS) { - for (var t = this.CHANNEL_COLORS.length - 1; t >= 0; --t) this.CHANNEL_COLORS.splice(t, 1); - this.CHANNEL_COLORS = [] - } - this.releaseShader() - }, e.prototype.releaseShader = function() { - for (var t = at.frameBuffers.length, i = 0; i < t; i++) this.gl.deleteFramebuffer(at.frameBuffers[i].framebuffer); - at.frameBuffers = [], at.glContext = [] - }, e.prototype.init = function(t, i, e) { - for (var o = 0; o < i.length; o++) { - var n = i[o].getClipIDList(); - if (null != n) { - var s = this.findSameClip(n); - null == s && (s = new r(this, t, n), this.clipContextList.push(s)); - var _ = i[o].getDrawDataID(), - a = t.getDrawDataIndex(_); - s.addClippedDrawData(_, a); - e[o].clipBufPre_clipContext = s - } - } - }, e.prototype.getMaskRenderTexture = function() { - var t = null; - return t = this.dp_webgl.createFramebuffer(), at.frameBuffers[this.dp_webgl.glno] = t, this.dp_webgl.glno - }, e.prototype.setupClip = function(t, i) { - for (var e = 0, r = 0; r < this.clipContextList.length; r++) { - var o = this.clipContextList[r]; - this.calcClippedDrawTotalBounds(t, o), o.isUsing && e++ - } - if (e > 0) { - var n = i.gl.getParameter(i.gl.FRAMEBUFFER_BINDING), - s = new Array(4); - s[0] = 0, s[1] = 0, s[2] = i.gl.canvas.width, s[3] = i.gl.canvas.height, i.gl.viewport(0, 0, at.clippingMaskBufferSize, at.clippingMaskBufferSize), this.setupLayoutBounds(e), i.gl.bindFramebuffer(i.gl.FRAMEBUFFER, at.frameBuffers[this.curFrameNo].framebuffer), i.gl.clearColor(0, 0, 0, 0), i.gl.clear(i.gl.COLOR_BUFFER_BIT); - for (var r = 0; r < this.clipContextList.length; r++) { - var o = this.clipContextList[r], - _ = o.allClippedDrawRect, - a = (o.layoutChannelNo, o.layoutBounds); - this.tmpBoundsOnModel._$jL(_), this.tmpBoundsOnModel.expand(.05 * _.width, .05 * _.height); - var h = a.width / this.tmpBoundsOnModel.width, - l = a.height / this.tmpBoundsOnModel.height; - this.tmpMatrix2.identity(), this.tmpMatrix2.translate(-1, -1, 0), this.tmpMatrix2.scale(2, 2, 1), this.tmpMatrix2.translate(a.x, a.y, 0), this.tmpMatrix2.scale(h, l, 1), this.tmpMatrix2.translate(-this.tmpBoundsOnModel.x, -this.tmpBoundsOnModel.y, 0), this.tmpMatrixForMask.setMatrix(this.tmpMatrix2.m), this.tmpMatrix2.identity(), this.tmpMatrix2.translate(a.x, a.y, 0), this.tmpMatrix2.scale(h, l, 1), this.tmpMatrix2.translate(-this.tmpBoundsOnModel.x, -this.tmpBoundsOnModel.y, 0), this.tmpMatrixForDraw.setMatrix(this.tmpMatrix2.m); - for (var $ = this.tmpMatrixForMask.getArray(), u = 0; u < 16; u++) o.matrixForMask[u] = $[u]; - for (var p = this.tmpMatrixForDraw.getArray(), u = 0; u < 16; u++) o.matrixForDraw[u] = p[u]; - for (var f = o.clippingMaskDrawIndexList.length, c = 0; c < f; c++) { - var d = o.clippingMaskDrawIndexList[c], - g = t.getDrawData(d), - y = t._$C2(d); - i.setClipBufPre_clipContextForMask(o), g.draw(i, t, y) - } - } - i.gl.bindFramebuffer(i.gl.FRAMEBUFFER, n), i.setClipBufPre_clipContextForMask(null), i.gl.viewport(s[0], s[1], s[2], s[3]) - } - }, e.prototype.getColorBuffer = function() { - return this.colorBuffer - }, e.prototype.findSameClip = function(t) { - for (var i = 0; i < this.clipContextList.length; i++) { - var e = this.clipContextList[i], - r = e.clipIDList.length; - if (r == t.length) { - for (var o = 0, n = 0; n < r; n++) for (var s = e.clipIDList[n], _ = 0; _ < r; _++) if (t[_] == s) { - o++; - break - } - if (o == r) return e - } - } - return null - }, e.prototype.calcClippedDrawTotalBounds = function(t, i) { - for (var e = t._$Ri.getModelImpl().getCanvasWidth(), r = t._$Ri.getModelImpl().getCanvasHeight(), o = e > r ? e : r, n = o, s = o, _ = 0, a = 0, h = i.clippedDrawContextList.length, l = 0; l < h; l++) { - var $ = i.clippedDrawContextList[l], - u = $.drawDataIndex, - p = t._$C2(u); - if (p._$yo()) { - for (var f = p.getTransformedPoints(), c = f.length, d = [], g = [], y = 0, m = U._$i2; m < c; m += U._$No) d[y] = f[m], g[y] = f[m + 1], y++; - var T = Math.min.apply(null, d), - P = Math.min.apply(null, g), - S = Math.max.apply(null, d), - v = Math.max.apply(null, g); - T < n && (n = T), P < s && (s = P), S > _ && (_ = S), v > a && (a = v) - } - } - if (n == o) i.allClippedDrawRect.x = 0, i.allClippedDrawRect.y = 0, i.allClippedDrawRect.width = 0, i.allClippedDrawRect.height = 0, i.isUsing = !1; - else { - var L = _ - n, - M = a - s; - i.allClippedDrawRect.x = n, i.allClippedDrawRect.y = s, i.allClippedDrawRect.width = L, i.allClippedDrawRect.height = M, i.isUsing = !0 - } - }, e.prototype.setupLayoutBounds = function(t) { - var i = t / e.CHANNEL_COUNT, - r = t % e.CHANNEL_COUNT; - i = ~~i, r = ~~r; - for (var o = 0, n = 0; n < e.CHANNEL_COUNT; n++) { - var s = i + (n < r ? 1 : 0); - if (0 == s); - else if (1 == s) { - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = 0, a.layoutBounds.y = 0, a.layoutBounds.width = 1, a.layoutBounds.height = 1 - } else if (2 == s) for (var h = 0; h < s; h++) { - var l = h % 2, - $ = 0; - l = ~~l; - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = .5 * l, a.layoutBounds.y = 0, a.layoutBounds.width = .5, a.layoutBounds.height = 1 - } else if (s <= 4) for (var h = 0; h < s; h++) { - var l = h % 2, - $ = h / 2; - l = ~~l, $ = ~~$; - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = .5 * l, a.layoutBounds.y = .5 * $, a.layoutBounds.width = .5, a.layoutBounds.height = .5 - } else if (s <= 9) for (var h = 0; h < s; h++) { - var l = h % 3, - $ = h / 3; - l = ~~l, $ = ~~$; - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = l / 3, a.layoutBounds.y = $ / 3, a.layoutBounds.width = 1 / 3, a.layoutBounds.height = 1 / 3 - } else _._$li("_$6 _$0P mask count : %d", s) - } - }, r.prototype.addClippedDrawData = function(t, i) { - var e = new o(t, i); - this.clippedDrawContextList.push(e) - }, s._$JT = function(t, i, e) { - var r = t / i, - o = e / i, - n = o, - s = 1 - (1 - o) * (1 - o), - _ = 1 - (1 - n) * (1 - n), - a = 1 / 3 * (1 - o) * s + (n * (2 / 3) + 1 / 3 * (1 - n)) * (1 - s), - h = (n + 2 / 3 * (1 - n)) * _ + (o * (1 / 3) + 2 / 3 * (1 - o)) * (1 - _), - l = 1 - 3 * h + 3 * a - 0, - $ = 3 * h - 6 * a + 0, - u = 3 * a - 0; - if (r <= 0) return 0; - if (r >= 1) return 1; - var p = r, - f = p * p; - return l * (p * f) + $ * f + u * p + 0 - }, s.prototype._$a0 = function() {}, s.prototype.setFadeIn = function(t) { - this._$dP = t - }, s.prototype.setFadeOut = function(t) { - this._$eo = t - }, s.prototype._$pT = function(t) { - this._$V0 = t - }, s.prototype.getFadeOut = function() { - return this._$eo - }, s.prototype._$4T = function() { - return this._$eo - }, s.prototype._$mT = function() { - return this._$V0 - }, s.prototype.getDurationMSec = function() { - return -1 - }, s.prototype.getLoopDurationMSec = function() { - return -1 - }, s.prototype.updateParam = function(t, i) { - if (i._$AT && !i._$9L) { - var e = w.getUserTimeMSec(); - if (i._$z2 < 0) { - i._$z2 = e, i._$bs = e; - var r = this.getDurationMSec(); - i._$Do < 0 && (i._$Do = r <= 0 ? -1 : i._$z2 + r) - } - var o = this._$V0; - o = o * (0 == this._$dP ? 1 : ht._$r2((e - i._$bs) / this._$dP)) * (0 == this._$eo || i._$Do < 0 ? 1 : ht._$r2((i._$Do - e) / this._$eo)), 0 <= o && o <= 1 || console.log("### assert!! ### "), this.updateParamExe(t, e, o, i), i._$Do > 0 && i._$Do < e && (i._$9L = !0) - } - }, s.prototype.updateParamExe = function(t, i, e, r) {}, _._$8s = 0, _._$fT = new Object, _.start = function(t) { - var i = _._$fT[t]; - null == i && (i = new a, i._$r = t, _._$fT[t] = i), i._$0S = w.getSystemTimeMSec() - }, _.dump = function(t) { - var i = _._$fT[t]; - if (null != i) { - var e = w.getSystemTimeMSec(), - r = e - i._$0S; - return console.log(t + " : " + r + "ms"), r - } - return -1 - }, _.end = function(t) { - var i = _._$fT[t]; - if (null != i) { - return w.getSystemTimeMSec() - i._$0S - } - return -1 - }, _._$li = function(t, i) { - console.log("_$li : " + t + "\n", i) - }, _._$Ji = function(t, i) { - console.log(t, i) - }, _._$dL = function(t, i) { - console.log(t, i), console.log("\n") - }, _._$KL = function(t, i) { - for (var e = 0; e < i; e++) e % 16 == 0 && e > 0 ? console.log("\n") : e % 8 == 0 && e > 0 && console.log(" "), console.log("%02X ", 255 & t[e]); - console.log("\n") - }, _._$nr = function(t, i, e) { - console.log("%s\n", t); - for (var r = i.length, o = 0; o < r; ++o) console.log("%5d", i[o]), console.log("%s\n", e), console.log(","); - console.log("\n") - }, _._$Rb = function(t) { - console.log("dump exception : " + t), console.log("stack :: " + t.stack) - }, h.prototype._$8P = function() { - return .5 * (this.x + this.x + this.width) - }, h.prototype._$6P = function() { - return .5 * (this.y + this.y + this.height) - }, h.prototype._$EL = function() { - return this.x + this.width - }, h.prototype._$5T = function() { - return this.y + this.height - }, h.prototype._$jL = function(t, i, e, r) { - this.x = t, this.y = i, this.width = e, this.height = r - }, h.prototype._$jL = function(t) { - this.x = t.x, this.y = t.y, this.width = t.width, this.height = t.height - }, l.prototype = new et, l._$tP = new Object, l._$27 = function() { - l._$tP.clear() - }, l.getID = function(t) { - var i = l._$tP[t]; - return null == i && (i = new l(t), l._$tP[t] = i), i - }, l.prototype._$3s = function() { - return new l - }, u.prototype = new et, u._$tP = new Object, u._$27 = function() { - u._$tP.clear() - }, u.getID = function(t) { - var i = u._$tP[t]; - return null == i && (i = new u(t), u._$tP[t] = i), i - }, u.prototype._$3s = function() { - return new u - }, p._$42 = 0, p.prototype._$zP = function() { - null == this._$vo && (this._$vo = new ot), null == this._$F2 && (this._$F2 = new Array) - }, p.prototype.getCanvasWidth = function() { - return this._$ao - }, p.prototype.getCanvasHeight = function() { - return this._$1S - }, p.prototype._$F0 = function(t) { - this._$vo = t._$nP(), this._$F2 = t._$nP(), this._$ao = t._$6L(), this._$1S = t._$6L() - }, p.prototype._$6S = function(t) { - this._$F2.push(t) - }, p.prototype._$Xr = function() { - return this._$F2 - }, p.prototype._$E2 = function() { - return this._$vo - }, f.prototype.setup = function(t, i, e) { - this._$ks = this._$Yb(), this.p2._$xT(), 3 == arguments.length && (this._$Fo = t, this._$L2 = i, this.p1._$p = e, this.p2._$p = e, this.p2.y = t, this.setup()) - }, f.prototype.getPhysicsPoint1 = function() { - return this.p1 - }, f.prototype.getPhysicsPoint2 = function() { - return this.p2 - }, f.prototype._$qr = function() { - return this._$Db - }, f.prototype._$pr = function(t) { - this._$Db = t - }, f.prototype._$5r = function() { - return this._$M2 - }, f.prototype._$Cs = function() { - return this._$9b - }, f.prototype._$Yb = function() { - return -180 * Math.atan2(this.p1.x - this.p2.x, -(this.p1.y - this.p2.y)) / Math.PI - }, f.prototype.addSrcParam = function(t, i, e, r) { - var o = new g(t, i, e, r); - this._$lL.push(o) - }, f.prototype.addTargetParam = function(t, i, e, r) { - var o = new T(t, i, e, r); - this._$qP.push(o) - }, f.prototype.update = function(t, i) { - if (0 == this._$iP) return this._$iP = this._$iT = i, void(this._$Fo = Math.sqrt((this.p1.x - this.p2.x) * (this.p1.x - this.p2.x) + (this.p1.y - this.p2.y) * (this.p1.y - this.p2.y))); - var e = (i - this._$iT) / 1e3; - if (0 != e) { - for (var r = this._$lL.length - 1; r >= 0; --r) { - this._$lL[r]._$oP(t, this) - } - this._$oo(t, e), this._$M2 = this._$Yb(), this._$9b = (this._$M2 - this._$ks) / e, this._$ks = this._$M2 - } - for (var r = this._$qP.length - 1; r >= 0; --r) { - this._$qP[r]._$YS(t, this) - } - this._$iT = i - }, f.prototype._$oo = function(t, i) { - i < .033 && (i = .033); - var e = 1 / i; - this.p1.vx = (this.p1.x - this.p1._$s0) * e, this.p1.vy = (this.p1.y - this.p1._$70) * e, this.p1.ax = (this.p1.vx - this.p1._$7L) * e, this.p1.ay = (this.p1.vy - this.p1._$HL) * e, this.p1.fx = this.p1.ax * this.p1._$p, this.p1.fy = this.p1.ay * this.p1._$p, this.p1._$xT(); - var r, o, n = -Math.atan2(this.p1.y - this.p2.y, this.p1.x - this.p2.x), - s = Math.cos(n), - _ = Math.sin(n), - a = 9.8 * this.p2._$p, - h = this._$Db * Lt._$bS, - l = a * Math.cos(n - h); - r = l * _, o = l * s; - var $ = -this.p1.fx * _ * _, - u = -this.p1.fy * _ * s, - p = -this.p2.vx * this._$L2, - f = -this.p2.vy * this._$L2; - this.p2.fx = r + $ + p, this.p2.fy = o + u + f, this.p2.ax = this.p2.fx / this.p2._$p, this.p2.ay = this.p2.fy / this.p2._$p, this.p2.vx += this.p2.ax * i, this.p2.vy += this.p2.ay * i, this.p2.x += this.p2.vx * i, this.p2.y += this.p2.vy * i; - var c = Math.sqrt((this.p1.x - this.p2.x) * (this.p1.x - this.p2.x) + (this.p1.y - this.p2.y) * (this.p1.y - this.p2.y)); - this.p2.x = this.p1.x + this._$Fo * (this.p2.x - this.p1.x) / c, this.p2.y = this.p1.y + this._$Fo * (this.p2.y - this.p1.y) / c, this.p2.vx = (this.p2.x - this.p2._$s0) * e, this.p2.vy = (this.p2.y - this.p2._$70) * e, this.p2._$xT() - }, c.prototype._$xT = function() { - this._$s0 = this.x, this._$70 = this.y, this._$7L = this.vx, this._$HL = this.vy - }, d.prototype._$oP = function(t, i) {}, g.prototype = new d, g.prototype._$oP = function(t, i) { - var e = this.scale * t.getParamFloat(this._$wL), - r = i.getPhysicsPoint1(); - switch (this._$tL) { - default: - case f.Src.SRC_TO_X: - r.x = r.x + (e - r.x) * this._$V0; - break; - case f.Src.SRC_TO_Y: - r.y = r.y + (e - r.y) * this._$V0; - break; - case f.Src.SRC_TO_G_ANGLE: - var o = i._$qr(); - o += (e - o) * this._$V0, i._$pr(o) - } - }, y.prototype._$YS = function(t, i) {}, T.prototype = new y, T.prototype._$YS = function(t, i) { - switch (this._$YP) { - default: - case f.Target.TARGET_FROM_ANGLE: - t.setParamFloat(this._$wL, this.scale * i._$5r(), this._$V0); - break; - case f.Target.TARGET_FROM_ANGLE_V: - t.setParamFloat(this._$wL, this.scale * i._$Cs(), this._$V0) - } - }, f.Src = function() {}, f.Src.SRC_TO_X = "SRC_TO_X", f.Src.SRC_TO_Y = "SRC_TO_Y", f.Src.SRC_TO_G_ANGLE = "SRC_TO_G_ANGLE", f.Target = function() {}, f.Target.TARGET_FROM_ANGLE = "TARGET_FROM_ANGLE", f.Target.TARGET_FROM_ANGLE_V = "TARGET_FROM_ANGLE_V", P.prototype.init = function(t) { - this._$fL = t._$fL, this._$gL = t._$gL, this._$B0 = t._$B0, this._$z0 = t._$z0, this._$qT = t._$qT, this.reflectX = t.reflectX, this.reflectY = t.reflectY - }, P.prototype._$F0 = function(t) { - this._$fL = t._$_T(), this._$gL = t._$_T(), this._$B0 = t._$_T(), this._$z0 = t._$_T(), this._$qT = t._$_T(), t.getFormatVersion() >= G.LIVE2D_FORMAT_VERSION_V2_10_SDK2 && (this.reflectX = t._$po(), this.reflectY = t._$po()) - }, P.prototype._$e = function() {}; - var It = function() {}; - It._$ni = function(t, i, e, r, o, n, s, _, a) { - var h = s * n - _ * o; - if (0 == h) return null; - var l, $ = ((t - e) * n - (i - r) * o) / h; - return l = 0 != o ? (t - e - $ * s) / o : (i - r - $ * _) / n, isNaN(l) && (l = (t - e - $ * s) / o, isNaN(l) && (l = (i - r - $ * _) / n), isNaN(l) && (console.log("a is NaN @UtVector#_$ni() "), console.log("v1x : " + o), console.log("v1x != 0 ? " + (0 != o)))), null == a ? new Array(l, $) : (a[0] = l, a[1] = $, a) - }, S.prototype._$8P = function() { - return this.x + .5 * this.width - }, S.prototype._$6P = function() { - return this.y + .5 * this.height - }, S.prototype._$EL = function() { - return this.x + this.width - }, S.prototype._$5T = function() { - return this.y + this.height - }, S.prototype._$jL = function(t, i, e, r) { - this.x = t, this.y = i, this.width = e, this.height = r - }, S.prototype._$jL = function(t) { - this.x = t.x, this.y = t.y, this.width = t.width, this.height = t.height - }, S.prototype.contains = function(t, i) { - return this.x <= this.x && this.y <= this.y && this.x <= this.x + this.width && this.y <= this.y + this.height - }, S.prototype.expand = function(t, i) { - this.x -= t, this.y -= i, this.width += 2 * t, this.height += 2 * i - }, v._$Z2 = function(t, i, e, r) { - var o = i._$Q2(t, e), - n = t._$vs(), - s = t._$Tr(); - if (i._$zr(n, s, o), o <= 0) return r[n[0]]; - if (1 == o) { - var _ = r[n[0]], - a = r[n[1]], - h = s[0]; - return _ + (a - _) * h | 0 - } - if (2 == o) { - var _ = r[n[0]], - a = r[n[1]], - l = r[n[2]], - $ = r[n[3]], - h = s[0], - u = s[1], - p = _ + (a - _) * h | 0, - f = l + ($ - l) * h | 0; - return p + (f - p) * u | 0 - } - if (3 == o) { - var c = r[n[0]], - d = r[n[1]], - g = r[n[2]], - y = r[n[3]], - m = r[n[4]], - T = r[n[5]], - P = r[n[6]], - S = r[n[7]], - h = s[0], - u = s[1], - v = s[2], - _ = c + (d - c) * h | 0, - a = g + (y - g) * h | 0, - l = m + (T - m) * h | 0, - $ = P + (S - P) * h | 0, - p = _ + (a - _) * u | 0, - f = l + ($ - l) * u | 0; - return p + (f - p) * v | 0 - } - if (4 == o) { - var L = r[n[0]], - M = r[n[1]], - E = r[n[2]], - A = r[n[3]], - I = r[n[4]], - w = r[n[5]], - x = r[n[6]], - O = r[n[7]], - D = r[n[8]], - R = r[n[9]], - b = r[n[10]], - F = r[n[11]], - C = r[n[12]], - N = r[n[13]], - B = r[n[14]], - U = r[n[15]], - h = s[0], - u = s[1], - v = s[2], - G = s[3], - c = L + (M - L) * h | 0, - d = E + (A - E) * h | 0, - g = I + (w - I) * h | 0, - y = x + (O - x) * h | 0, - m = D + (R - D) * h | 0, - T = b + (F - b) * h | 0, - P = C + (N - C) * h | 0, - S = B + (U - B) * h | 0, - _ = c + (d - c) * u | 0, - a = g + (y - g) * u | 0, - l = m + (T - m) * u | 0, - $ = P + (S - P) * u | 0, - p = _ + (a - _) * v | 0, - f = l + ($ - l) * v | 0; - return p + (f - p) * G | 0 - } - for (var Y = 1 << o, k = new Float32Array(Y), V = 0; V < Y; V++) { - for (var X = V, z = 1, H = 0; H < o; H++) z *= X % 2 == 0 ? 1 - s[H] : s[H], X /= 2; - k[V] = z - } - for (var W = new Float32Array(Y), j = 0; j < Y; j++) W[j] = r[n[j]]; - for (var q = 0, j = 0; j < Y; j++) q += k[j] * W[j]; - return q + .5 | 0 - }, v._$br = function(t, i, e, r) { - var o = i._$Q2(t, e), - n = t._$vs(), - s = t._$Tr(); - if (i._$zr(n, s, o), o <= 0) return r[n[0]]; - if (1 == o) { - var _ = r[n[0]], - a = r[n[1]], - h = s[0]; - return _ + (a - _) * h - } - if (2 == o) { - var _ = r[n[0]], - a = r[n[1]], - l = r[n[2]], - $ = r[n[3]], - h = s[0], - u = s[1]; - return (1 - u) * (_ + (a - _) * h) + u * (l + ($ - l) * h) - } - if (3 == o) { - var p = r[n[0]], - f = r[n[1]], - c = r[n[2]], - d = r[n[3]], - g = r[n[4]], - y = r[n[5]], - m = r[n[6]], - T = r[n[7]], - h = s[0], - u = s[1], - P = s[2]; - return (1 - P) * ((1 - u) * (p + (f - p) * h) + u * (c + (d - c) * h)) + P * ((1 - u) * (g + (y - g) * h) + u * (m + (T - m) * h)) - } - if (4 == o) { - var S = r[n[0]], - v = r[n[1]], - L = r[n[2]], - M = r[n[3]], - E = r[n[4]], - A = r[n[5]], - I = r[n[6]], - w = r[n[7]], - x = r[n[8]], - O = r[n[9]], - D = r[n[10]], - R = r[n[11]], - b = r[n[12]], - F = r[n[13]], - C = r[n[14]], - N = r[n[15]], - h = s[0], - u = s[1], - P = s[2], - B = s[3]; - return (1 - B) * ((1 - P) * ((1 - u) * (S + (v - S) * h) + u * (L + (M - L) * h)) + P * ((1 - u) * (E + (A - E) * h) + u * (I + (w - I) * h))) + B * ((1 - P) * ((1 - u) * (x + (O - x) * h) + u * (D + (R - D) * h)) + P * ((1 - u) * (b + (F - b) * h) + u * (C + (N - C) * h))) - } - for (var U = 1 << o, G = new Float32Array(U), Y = 0; Y < U; Y++) { - for (var k = Y, V = 1, X = 0; X < o; X++) V *= k % 2 == 0 ? 1 - s[X] : s[X], k /= 2; - G[Y] = V - } - for (var z = new Float32Array(U), H = 0; H < U; H++) z[H] = r[n[H]]; - for (var W = 0, H = 0; H < U; H++) W += G[H] * z[H]; - return W - }, v._$Vr = function(t, i, e, r, o, n, s, _) { - var a = i._$Q2(t, e), - h = t._$vs(), - l = t._$Tr(); - i._$zr(h, l, a); - var $ = 2 * r, - u = s; - if (a <= 0) { - var p = h[0], - f = o[p]; - if (2 == _ && 0 == s) w._$jT(f, 0, n, 0, $); - else for (var c = 0; c < $;) n[u] = f[c++], n[u + 1] = f[c++], u += _ - } else if (1 == a) for (var f = o[h[0]], d = o[h[1]], g = l[0], y = 1 - g, c = 0; c < $;) n[u] = f[c] * y + d[c] * g, ++c, n[u + 1] = f[c] * y + d[c] * g, ++c, u += _; - else if (2 == a) for (var f = o[h[0]], d = o[h[1]], m = o[h[2]], T = o[h[3]], g = l[0], P = l[1], y = 1 - g, S = 1 - P, v = S * y, L = S * g, M = P * y, E = P * g, c = 0; c < $;) n[u] = v * f[c] + L * d[c] + M * m[c] + E * T[c], ++c, n[u + 1] = v * f[c] + L * d[c] + M * m[c] + E * T[c], ++c, u += _; - else if (3 == a) for (var A = o[h[0]], I = o[h[1]], x = o[h[2]], O = o[h[3]], D = o[h[4]], R = o[h[5]], b = o[h[6]], F = o[h[7]], g = l[0], P = l[1], C = l[2], y = 1 - g, S = 1 - P, N = 1 - C, B = N * S * y, U = N * S * g, G = N * P * y, Y = N * P * g, k = C * S * y, V = C * S * g, X = C * P * y, z = C * P * g, c = 0; c < $;) n[u] = B * A[c] + U * I[c] + G * x[c] + Y * O[c] + k * D[c] + V * R[c] + X * b[c] + z * F[c], ++c, n[u + 1] = B * A[c] + U * I[c] + G * x[c] + Y * O[c] + k * D[c] + V * R[c] + X * b[c] + z * F[c], ++c, u += _; - else if (4 == a) for (var H = o[h[0]], W = o[h[1]], j = o[h[2]], q = o[h[3]], J = o[h[4]], Q = o[h[5]], Z = o[h[6]], K = o[h[7]], tt = o[h[8]], it = o[h[9]], et = o[h[10]], rt = o[h[11]], ot = o[h[12]], nt = o[h[13]], st = o[h[14]], _t = o[h[15]], g = l[0], P = l[1], C = l[2], at = l[3], y = 1 - g, S = 1 - P, N = 1 - C, ht = 1 - at, lt = ht * N * S * y, $t = ht * N * S * g, ut = ht * N * P * y, pt = ht * N * P * g, ft = ht * C * S * y, ct = ht * C * S * g, dt = ht * C * P * y, gt = ht * C * P * g, yt = at * N * S * y, mt = at * N * S * g, Tt = at * N * P * y, Pt = at * N * P * g, St = at * C * S * y, vt = at * C * S * g, Lt = at * C * P * y, Mt = at * C * P * g, c = 0; c < $;) n[u] = lt * H[c] + $t * W[c] + ut * j[c] + pt * q[c] + ft * J[c] + ct * Q[c] + dt * Z[c] + gt * K[c] + yt * tt[c] + mt * it[c] + Tt * et[c] + Pt * rt[c] + St * ot[c] + vt * nt[c] + Lt * st[c] + Mt * _t[c], ++c, n[u + 1] = lt * H[c] + $t * W[c] + ut * j[c] + pt * q[c] + ft * J[c] + ct * Q[c] + dt * Z[c] + gt * K[c] + yt * tt[c] + mt * it[c] + Tt * et[c] + Pt * rt[c] + St * ot[c] + vt * nt[c] + Lt * st[c] + Mt * _t[c], ++c, u += _; - else { - for (var Et = 1 << a, At = new Float32Array(Et), It = 0; It < Et; It++) { - for (var wt = It, xt = 1, Ot = 0; Ot < a; Ot++) xt *= wt % 2 == 0 ? 1 - l[Ot] : l[Ot], wt /= 2; - At[It] = xt - } - for (var Dt = new Float32Array(Et), Rt = 0; Rt < Et; Rt++) Dt[Rt] = o[h[Rt]]; - for (var c = 0; c < $;) { - for (var bt = 0, Ft = 0, Ct = c + 1, Rt = 0; Rt < Et; Rt++) bt += At[Rt] * Dt[Rt][c], Ft += At[Rt] * Dt[Rt][Ct]; - c += 2, n[u] = bt, n[u + 1] = Ft, u += _ - } - } - }, L.prototype._$HT = function(t, i) { - this.x = t, this.y = i - }, L.prototype._$HT = function(t) { - this.x = t.x, this.y = t.y - }, M._$ur = -2, M._$ES = 500, M._$wb = 2, M._$8S = 3, M._$52 = M._$ES, M._$R2 = M._$ES, M._$or = function() { - return M._$52 - }, M._$Pr = function() { - return M._$R2 - }, M.prototype.convertClipIDForV2_11 = function(t) { - var i = []; - return null == t ? null : 0 == t.length ? null : /,/.test(t) ? i = t.id.split(",") : (i.push(t.id), i) - }, M.prototype._$F0 = function(t) { - this._$gP = t._$nP(), this._$dr = t._$nP(), this._$GS = t._$nP(), this._$qb = t._$6L(), this._$Lb = t._$cS(), this._$mS = t._$Tb(), t.getFormatVersion() >= G._$T7 ? (this.clipID = t._$nP(), this.clipIDList = this.convertClipIDForV2_11(this.clipID)) : this.clipIDList = [], this._$MS(this._$Lb) - }, M.prototype.getClipIDList = function() { - return this.clipIDList - }, M.prototype.init = function(t) {}, M.prototype._$Nr = function(t, i) { - if (i._$IS[0] = !1, i._$Us = v._$Z2(t, this._$GS, i._$IS, this._$Lb), at._$Zs); - else if (i._$IS[0]) return; - i._$7s = v._$br(t, this._$GS, i._$IS, this._$mS) - }, M.prototype._$2b = function(t, i) {}, M.prototype.getDrawDataID = function() { - return this._$gP - }, M.prototype._$j2 = function(t) { - this._$gP = t - }, M.prototype.getOpacity = function(t, i) { - return i._$7s - }, M.prototype._$zS = function(t, i) { - return i._$Us - }, M.prototype._$MS = function(t) { - for (var i = t.length - 1; i >= 0; --i) { - var e = t[i]; - e < M._$52 ? M._$52 = e : e > M._$R2 && (M._$R2 = e) - } - }, M.prototype.getTargetBaseDataID = function() { - return this._$dr - }, M.prototype._$gs = function(t) { - this._$dr = t - }, M.prototype._$32 = function() { - return null != this._$dr && this._$dr != yt._$2o() - }, M.prototype.preDraw = function(t, i, e) {}, M.prototype.draw = function(t, i, e) {}, M.prototype.getType = function() {}, M.prototype._$B2 = function(t, i, e) {}, E._$ps = 32, E.CLIPPING_PROCESS_NONE = 0, E.CLIPPING_PROCESS_OVERWRITE_ALPHA = 1, E.CLIPPING_PROCESS_MULTIPLY_ALPHA = 2, E.CLIPPING_PROCESS_DRAW = 3, E.CLIPPING_PROCESS_CLEAR_ALPHA = 4, E.prototype.setChannelFlagAsColor = function(t, i) { - this.CHANNEL_COLORS[t] = i - }, E.prototype.getChannelFlagAsColor = function(t) { - return this.CHANNEL_COLORS[t] - }, E.prototype._$ZT = function() {}, E.prototype._$Uo = function(t, i, e, r, o, n, s) {}, E.prototype._$Rs = function() { - return -1 - }, E.prototype._$Ds = function(t) {}, E.prototype.setBaseColor = function(t, i, e, r) { - t < 0 ? t = 0 : t > 1 && (t = 1), i < 0 ? i = 0 : i > 1 && (i = 1), e < 0 ? e = 0 : e > 1 && (e = 1), r < 0 ? r = 0 : r > 1 && (r = 1), this._$lT = t, this._$C0 = i, this._$tT = e, this._$WL = r - }, E.prototype._$WP = function(t) { - this.culling = t - }, E.prototype.setMatrix = function(t) { - for (var i = 0; i < 16; i++) this.matrix4x4[i] = t[i] - }, E.prototype._$IT = function() { - return this.matrix4x4 - }, E.prototype.setPremultipliedAlpha = function(t) { - this.premultipliedAlpha = t - }, E.prototype.isPremultipliedAlpha = function() { - return this.premultipliedAlpha - }, E.prototype.setAnisotropy = function(t) { - this.anisotropy = t - }, E.prototype.getAnisotropy = function() { - return this.anisotropy - }, E.prototype.getClippingProcess = function() { - return this.clippingProcess - }, E.prototype.setClippingProcess = function(t) { - this.clippingProcess = t - }, E.prototype.setClipBufPre_clipContextForMask = function(t) { - this.clipBufPre_clipContextMask = t - }, E.prototype.getClipBufPre_clipContextMask = function() { - return this.clipBufPre_clipContextMask - }, E.prototype.setClipBufPre_clipContextForDraw = function(t) { - this.clipBufPre_clipContextDraw = t - }, E.prototype.getClipBufPre_clipContextDraw = function() { - return this.clipBufPre_clipContextDraw - }, I._$ur = -2, I._$c2 = 1, I._$_b = 2, I.prototype._$F0 = function(t) { - this._$kP = t._$nP(), this._$dr = t._$nP() - }, I.prototype.readV2_opacity = function(t) { - t.getFormatVersion() >= G.LIVE2D_FORMAT_VERSION_V2_10_SDK2 && (this._$mS = t._$Tb()) - }, I.prototype.init = function(t) {}, I.prototype._$Nr = function(t, i) {}, I.prototype.interpolateOpacity = function(t, i, e, r) { - null == this._$mS ? e.setInterpolatedOpacity(1) : e.setInterpolatedOpacity(v._$br(t, i, r, this._$mS)) - }, I.prototype._$2b = function(t, i) {}, I.prototype._$nb = function(t, i, e, r, o, n, s) {}, I.prototype.getType = function() {}, I.prototype._$gs = function(t) { - this._$dr = t - }, I.prototype._$a2 = function(t) { - this._$kP = t - }, I.prototype.getTargetBaseDataID = function() { - return this._$dr - }, I.prototype.getBaseDataID = function() { - return this._$kP - }, I.prototype._$32 = function() { - return null != this._$dr && this._$dr != yt._$2o() - }, w._$W2 = 0, w._$CS = w._$W2, w._$Mo = function() { - return !0 - }, w._$XP = function(t) { - try { - for (var i = getTimeMSec(); getTimeMSec() - i < t;); - } catch (t) { - t._$Rb() - } - }, w.getUserTimeMSec = function() { - return w._$CS == w._$W2 ? w.getSystemTimeMSec() : w._$CS - }, w.setUserTimeMSec = function(t) { - w._$CS = t - }, w.updateUserTimeMSec = function() { - return w._$CS = w.getSystemTimeMSec() - }, w.getTimeMSec = function() { - return (new Date).getTime() - }, w.getSystemTimeMSec = function() { - return (new Date).getTime() - }, w._$Q = function(t) {}, w._$jT = function(t, i, e, r, o) { - for (var n = 0; n < o; n++) e[r + n] = t[i + n] - }, x._$ds = -2, x.prototype._$F0 = function(t) { - this._$wL = t._$nP(), this._$VP = t._$6L(), this._$GP = t._$nP() - }, x.prototype.getParamIndex = function(t) { - return this._$2r != t && (this._$8o = x._$ds), this._$8o - }, x.prototype._$Pb = function(t, i) { - this._$8o = t, this._$2r = i - }, x.prototype.getParamID = function() { - return this._$wL - }, x.prototype._$yP = function(t) { - this._$wL = t - }, x.prototype._$N2 = function() { - return this._$VP - }, x.prototype._$d2 = function() { - return this._$GP - }, x.prototype._$t2 = function(t, i) { - this._$VP = t, this._$GP = i - }, x.prototype._$Lr = function() { - return this._$O2 - }, x.prototype._$wr = function(t) { - this._$O2 = t - }, x.prototype._$SL = function() { - return this._$ri - }, x.prototype._$AL = function(t) { - this._$ri = t - }, O.startsWith = function(t, i, e) { - var r = i + e.length; - if (r >= t.length) return !1; - for (var o = i; o < r; o++) if (O.getChar(t, o) != e.charAt(o - i)) return !1; - return !0 - }, O.getChar = function(t, i) { - return String.fromCharCode(t.getUint8(i)) - }, O.createString = function(t, i, e) { - for (var r = new ArrayBuffer(2 * e), o = new Uint16Array(r), n = 0; n < e; n++) o[n] = t.getUint8(i + n); - return String.fromCharCode.apply(null, o) - }, O._$LS = function(t, i, e, r) { - t instanceof ArrayBuffer && (t = new DataView(t)); - var o = e, - n = !1, - s = !1, - _ = 0, - a = O.getChar(t, o); - "-" == a && (n = !0, o++); - for (var h = !1; o < i; o++) { - switch (a = O.getChar(t, o)) { - case "0": - _ *= 10; - break; - case "1": - _ = 10 * _ + 1; - break; - case "2": - _ = 10 * _ + 2; - break; - case "3": - _ = 10 * _ + 3; - break; - case "4": - _ = 10 * _ + 4; - break; - case "5": - _ = 10 * _ + 5; - break; - case "6": - _ = 10 * _ + 6; - break; - case "7": - _ = 10 * _ + 7; - break; - case "8": - _ = 10 * _ + 8; - break; - case "9": - _ = 10 * _ + 9; - break; - case ".": - s = !0, o++, h = !0; - break; - default: - h = !0 - } - if (h) break - } - if (s) for (var l = .1, $ = !1; o < i; o++) { - switch (a = O.getChar(t, o)) { - case "0": - break; - case "1": - _ += 1 * l; - break; - case "2": - _ += 2 * l; - break; - case "3": - _ += 3 * l; - break; - case "4": - _ += 4 * l; - break; - case "5": - _ += 5 * l; - break; - case "6": - _ += 6 * l; - break; - case "7": - _ += 7 * l; - break; - case "8": - _ += 8 * l; - break; - case "9": - _ += 9 * l; - break; - default: - $ = !0 - } - if (l *= .1, $) break - } - return n && (_ = -_), r[0] = o, _ - }, D.prototype._$zP = function() { - this._$Ob = new Array - }, D.prototype._$F0 = function(t) { - this._$Ob = t._$nP() - }, D.prototype._$Ur = function(t) { - if (t._$WS()) return !0; - for (var i = t._$v2(), e = this._$Ob.length - 1; e >= 0; --e) { - var r = this._$Ob[e].getParamIndex(i); - if (r == x._$ds && (r = t.getParamIndex(this._$Ob[e].getParamID())), t._$Xb(r)) return !0 - } - return !1 - }, D.prototype._$Q2 = function(t, i) { - for (var e, r, o = this._$Ob.length, n = t._$v2(), s = 0, _ = 0; _ < o; _++) { - var a = this._$Ob[_]; - if (e = a.getParamIndex(n), e == x._$ds && (e = t.getParamIndex(a.getParamID()), a._$Pb(e, n)), e < 0) throw new Exception("err 23242 : " + a.getParamID()); - var h = e < 0 ? 0 : t.getParamFloat(e); - r = a._$N2(); - var l, $, u = a._$d2(), - p = -1, - f = 0; - if (r < 1); - else if (1 == r) l = u[0], l - U._$J < h && h < l + U._$J ? (p = 0, f = 0) : (p = 0, i[0] = !0); - else if (l = u[0], h < l - U._$J) p = 0, i[0] = !0; - else if (h < l + U._$J) p = 0; - else { - for (var c = !1, d = 1; d < r; ++d) { - if ($ = u[d], h < $ + U._$J) { - $ - U._$J < h ? p = d : (p = d - 1, f = (h - l) / ($ - l), s++), c = !0; - break - } - l = $ - } - c || (p = r - 1, f = 0, i[0] = !0) - } - a._$wr(p), a._$AL(f) - } - return s - }, D.prototype._$zr = function(t, i, e) { - var r = 1 << e; - r + 1 > U._$Qb && console.log("err 23245\n"); - for (var o = this._$Ob.length, n = 1, s = 1, _ = 0, a = 0; a < r; ++a) t[a] = 0; - for (var h = 0; h < o; ++h) { - var l = this._$Ob[h]; - if (0 == l._$SL()) { - var $ = l._$Lr() * n; - if ($ < 0 && at._$3T) throw new Exception("err 23246"); - for (var a = 0; a < r; ++a) t[a] += $ - } else { - for (var $ = n * l._$Lr(), u = n * (l._$Lr() + 1), a = 0; a < r; ++a) t[a] += (a / s | 0) % 2 == 0 ? $ : u; - i[_++] = l._$SL(), s *= 2 - } - n *= l._$N2() - } - t[r] = 65535, i[_] = -1 - }, D.prototype._$h2 = function(t, i, e) { - for (var r = new Float32Array(i), o = 0; o < i; ++o) r[o] = e[o]; - var n = new x; - n._$yP(t), n._$t2(i, r), this._$Ob.push(n) - }, D.prototype._$J2 = function(t) { - for (var i = t, e = this._$Ob.length, r = 0; r < e; ++r) { - var o = this._$Ob[r], - n = o._$N2(), - s = i % o._$N2(), - _ = o._$d2()[s]; - console.log("%s[%d]=%7.2f / ", o.getParamID(), s, _), i /= n - } - console.log("\n") - }, D.prototype.getParamCount = function() { - return this._$Ob.length - }, D.prototype._$zs = function() { - return this._$Ob - }, R.prototype.identity = function() { - for (var t = 0; t < 16; t++) this.m[t] = t % 5 == 0 ? 1 : 0 - }, R.prototype.getArray = function() { - return this.m - }, R.prototype.getCopyMatrix = function() { - return new Float32Array(this.m) - }, R.prototype.setMatrix = function(t) { - if (null != t && 16 == t.length) for (var i = 0; i < 16; i++) this.m[i] = t[i] - }, R.prototype.mult = function(t, i, e) { - return null == i ? null : (this == i ? this.mult_safe(this.m, t.m, i.m, e) : this.mult_fast(this.m, t.m, i.m, e), i) - }, R.prototype.mult_safe = function(t, i, e, r) { - if (t == e) { - var o = new Array(16); - this.mult_fast(t, i, o, r); - for (var n = 15; n >= 0; --n) e[n] = o[n] - } else this.mult_fast(t, i, e, r) - }, R.prototype.mult_fast = function(t, i, e, r) { - r ? (e[0] = t[0] * i[0] + t[4] * i[1] + t[8] * i[2], e[4] = t[0] * i[4] + t[4] * i[5] + t[8] * i[6], e[8] = t[0] * i[8] + t[4] * i[9] + t[8] * i[10], e[12] = t[0] * i[12] + t[4] * i[13] + t[8] * i[14] + t[12], e[1] = t[1] * i[0] + t[5] * i[1] + t[9] * i[2], e[5] = t[1] * i[4] + t[5] * i[5] + t[9] * i[6], e[9] = t[1] * i[8] + t[5] * i[9] + t[9] * i[10], e[13] = t[1] * i[12] + t[5] * i[13] + t[9] * i[14] + t[13], e[2] = t[2] * i[0] + t[6] * i[1] + t[10] * i[2], e[6] = t[2] * i[4] + t[6] * i[5] + t[10] * i[6], e[10] = t[2] * i[8] + t[6] * i[9] + t[10] * i[10], e[14] = t[2] * i[12] + t[6] * i[13] + t[10] * i[14] + t[14], e[3] = e[7] = e[11] = 0, e[15] = 1) : (e[0] = t[0] * i[0] + t[4] * i[1] + t[8] * i[2] + t[12] * i[3], e[4] = t[0] * i[4] + t[4] * i[5] + t[8] * i[6] + t[12] * i[7], e[8] = t[0] * i[8] + t[4] * i[9] + t[8] * i[10] + t[12] * i[11], e[12] = t[0] * i[12] + t[4] * i[13] + t[8] * i[14] + t[12] * i[15], e[1] = t[1] * i[0] + t[5] * i[1] + t[9] * i[2] + t[13] * i[3], e[5] = t[1] * i[4] + t[5] * i[5] + t[9] * i[6] + t[13] * i[7], e[9] = t[1] * i[8] + t[5] * i[9] + t[9] * i[10] + t[13] * i[11], e[13] = t[1] * i[12] + t[5] * i[13] + t[9] * i[14] + t[13] * i[15], e[2] = t[2] * i[0] + t[6] * i[1] + t[10] * i[2] + t[14] * i[3], e[6] = t[2] * i[4] + t[6] * i[5] + t[10] * i[6] + t[14] * i[7], e[10] = t[2] * i[8] + t[6] * i[9] + t[10] * i[10] + t[14] * i[11], e[14] = t[2] * i[12] + t[6] * i[13] + t[10] * i[14] + t[14] * i[15], e[3] = t[3] * i[0] + t[7] * i[1] + t[11] * i[2] + t[15] * i[3], e[7] = t[3] * i[4] + t[7] * i[5] + t[11] * i[6] + t[15] * i[7], e[11] = t[3] * i[8] + t[7] * i[9] + t[11] * i[10] + t[15] * i[11], e[15] = t[3] * i[12] + t[7] * i[13] + t[11] * i[14] + t[15] * i[15]) - }, R.prototype.translate = function(t, i, e) { - this.m[12] = this.m[0] * t + this.m[4] * i + this.m[8] * e + this.m[12], this.m[13] = this.m[1] * t + this.m[5] * i + this.m[9] * e + this.m[13], this.m[14] = this.m[2] * t + this.m[6] * i + this.m[10] * e + this.m[14], this.m[15] = this.m[3] * t + this.m[7] * i + this.m[11] * e + this.m[15] - }, R.prototype.scale = function(t, i, e) { - this.m[0] *= t, this.m[4] *= i, this.m[8] *= e, this.m[1] *= t, this.m[5] *= i, this.m[9] *= e, this.m[2] *= t, this.m[6] *= i, this.m[10] *= e, this.m[3] *= t, this.m[7] *= i, this.m[11] *= e - }, R.prototype.rotateX = function(t) { - var i = Lt.fcos(t), - e = Lt._$9(t), - r = this.m[4]; - this.m[4] = r * i + this.m[8] * e, this.m[8] = r * -e + this.m[8] * i, r = this.m[5], this.m[5] = r * i + this.m[9] * e, this.m[9] = r * -e + this.m[9] * i, r = this.m[6], this.m[6] = r * i + this.m[10] * e, this.m[10] = r * -e + this.m[10] * i, r = this.m[7], this.m[7] = r * i + this.m[11] * e, this.m[11] = r * -e + this.m[11] * i - }, R.prototype.rotateY = function(t) { - var i = Lt.fcos(t), - e = Lt._$9(t), - r = this.m[0]; - this.m[0] = r * i + this.m[8] * -e, this.m[8] = r * e + this.m[8] * i, r = this.m[1], this.m[1] = r * i + this.m[9] * -e, this.m[9] = r * e + this.m[9] * i, r = m[2], this.m[2] = r * i + this.m[10] * -e, this.m[10] = r * e + this.m[10] * i, r = m[3], this.m[3] = r * i + this.m[11] * -e, this.m[11] = r * e + this.m[11] * i - }, R.prototype.rotateZ = function(t) { - var i = Lt.fcos(t), - e = Lt._$9(t), - r = this.m[0]; - this.m[0] = r * i + this.m[4] * e, this.m[4] = r * -e + this.m[4] * i, r = this.m[1], this.m[1] = r * i + this.m[5] * e, this.m[5] = r * -e + this.m[5] * i, r = this.m[2], this.m[2] = r * i + this.m[6] * e, this.m[6] = r * -e + this.m[6] * i, r = this.m[3], this.m[3] = r * i + this.m[7] * e, this.m[7] = r * -e + this.m[7] * i - }, b.prototype = new et, b._$tP = new Object, b._$27 = function() { - b._$tP.clear() - }, b.getID = function(t) { - var i = b._$tP[t]; - return null == i && (i = new b(t), b._$tP[t] = i), i - }, b.prototype._$3s = function() { - return new b - }, F._$kS = -1, F._$pS = 0, F._$hb = 1, F.STATE_IDENTITY = 0, F._$gb = 1, F._$fo = 2, F._$go = 4, F.prototype.transform = function(t, i, e) { - var r, o, n, s, _, a, h = 0, - l = 0; - switch (this._$hi) { - default: - return; - case F._$go | F._$fo | F._$gb: - for (r = this._$7, o = this._$H, n = this._$k, s = this._$f, _ = this._$g, a = this._$w; --e >= 0;) { - var $ = t[h++], - u = t[h++]; - i[l++] = r * $ + o * u + n, i[l++] = s * $ + _ * u + a - } - return; - case F._$go | F._$fo: - for (r = this._$7, o = this._$H, s = this._$f, _ = this._$g; --e >= 0;) { - var $ = t[h++], - u = t[h++]; - i[l++] = r * $ + o * u, i[l++] = s * $ + _ * u - } - return; - case F._$go | F._$gb: - for (o = this._$H, n = this._$k, s = this._$f, a = this._$w; --e >= 0;) { - var $ = t[h++]; - i[l++] = o * t[h++] + n, i[l++] = s * $ + a - } - return; - case F._$go: - for (o = this._$H, s = this._$f; --e >= 0;) { - var $ = t[h++]; - i[l++] = o * t[h++], i[l++] = s * $ - } - return; - case F._$fo | F._$gb: - for (r = this._$7, n = this._$k, _ = this._$g, a = this._$w; --e >= 0;) i[l++] = r * t[h++] + n, i[l++] = _ * t[h++] + a; - return; - case F._$fo: - for (r = this._$7, _ = this._$g; --e >= 0;) i[l++] = r * t[h++], i[l++] = _ * t[h++]; - return; - case F._$gb: - for (n = this._$k, a = this._$w; --e >= 0;) i[l++] = t[h++] + n, i[l++] = t[h++] + a; - return; - case F.STATE_IDENTITY: - return void(t == i && h == l || w._$jT(t, h, i, l, 2 * e)) - } - }, F.prototype.update = function() { - 0 == this._$H && 0 == this._$f ? 1 == this._$7 && 1 == this._$g ? 0 == this._$k && 0 == this._$w ? (this._$hi = F.STATE_IDENTITY, this._$Z = F._$pS) : (this._$hi = F._$gb, this._$Z = F._$hb) : 0 == this._$k && 0 == this._$w ? (this._$hi = F._$fo, this._$Z = F._$kS) : (this._$hi = F._$fo | F._$gb, this._$Z = F._$kS) : 0 == this._$7 && 0 == this._$g ? 0 == this._$k && 0 == this._$w ? (this._$hi = F._$go, this._$Z = F._$kS) : (this._$hi = F._$go | F._$gb, this._$Z = F._$kS) : 0 == this._$k && 0 == this._$w ? (this._$hi = F._$go | F._$fo, this._$Z = F._$kS) : (this._$hi = F._$go | F._$fo | F._$gb, this._$Z = F._$kS) - }, F.prototype._$RT = function(t) { - this._$IT(t); - var i = t[0], - e = t[2], - r = t[1], - o = t[3], - n = Math.sqrt(i * i + r * r), - s = i * o - e * r; - 0 == n ? at._$so && console.log("affine._$RT() / rt==0") : (t[0] = n, t[1] = s / n, t[2] = (r * o + i * e) / s, t[3] = Math.atan2(r, i)) - }, F.prototype._$ho = function(t, i, e, r) { - var o = new Float32Array(6), - n = new Float32Array(6); - t._$RT(o), i._$RT(n); - var s = new Float32Array(6); - s[0] = o[0] + (n[0] - o[0]) * e, s[1] = o[1] + (n[1] - o[1]) * e, s[2] = o[2] + (n[2] - o[2]) * e, s[3] = o[3] + (n[3] - o[3]) * e, s[4] = o[4] + (n[4] - o[4]) * e, s[5] = o[5] + (n[5] - o[5]) * e, r._$CT(s) - }, F.prototype._$CT = function(t) { - var i = Math.cos(t[3]), - e = Math.sin(t[3]); - this._$7 = t[0] * i, this._$f = t[0] * e, this._$H = t[1] * (t[2] * i - e), this._$g = t[1] * (t[2] * e + i), this._$k = t[4], this._$w = t[5], this.update() - }, F.prototype._$IT = function(t) { - t[0] = this._$7, t[1] = this._$f, t[2] = this._$H, t[3] = this._$g, t[4] = this._$k, t[5] = this._$w - }, C.prototype = new s, C._$cs = "VISIBLE:", C._$ar = "LAYOUT:", C._$Co = 0, C._$D2 = [], C._$1T = 1, C.loadMotion = function(t) { - var i = new C, - e = [0], - r = t.length; - i._$yT = 0; - for (var o = 0; o < r; ++o) { - var n = 255 & t[o]; - if ("\n" != n && "\r" != n) if ("#" != n) if ("$" != n) { - if ("a" <= n && n <= "z" || "A" <= n && n <= "Z" || "_" == n) { - for (var s = o, _ = -1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("=" == n) { - _ = o; - break - } - if (_ >= 0) { - var a = new B; - O.startsWith(t, s, C._$cs) ? (a._$RP = B._$hs, a._$4P = new String(t, s, _ - s)) : O.startsWith(t, s, C._$ar) ? (a._$4P = new String(t, s + 7, _ - s - 7), O.startsWith(t, s + 7, "ANCHOR_X") ? a._$RP = B._$xs : O.startsWith(t, s + 7, "ANCHOR_Y") ? a._$RP = B._$us : O.startsWith(t, s + 7, "SCALE_X") ? a._$RP = B._$qs : O.startsWith(t, s + 7, "SCALE_Y") ? a._$RP = B._$Ys : O.startsWith(t, s + 7, "X") ? a._$RP = B._$ws : O.startsWith(t, s + 7, "Y") && (a._$RP = B._$Ns)) : (a._$RP = B._$Fr, a._$4P = new String(t, s, _ - s)), i.motions.push(a); - var h = 0; - for (C._$D2.clear(), o = _ + 1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var l = O._$LS(t, r, o, e); - if (e[0] > 0) { - C._$D2.push(l), h++; - var $ = e[0]; - if ($ < o) { - console.log("_$n0 _$hi . @Live2DMotion loadMotion()\n"); - break - } - o = $ - } - } - a._$I0 = C._$D2._$BL(), h > i._$yT && (i._$yT = h) - } - } - } else { - for (var s = o, _ = -1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("=" == n) { - _ = o; - break - } - var u = !1; - if (_ >= 0) for (_ == s + 4 && "f" == t[s + 1] && "p" == t[s + 2] && "s" == t[s + 3] && (u = !0), o = _ + 1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var l = O._$LS(t, r, o, e); - e[0] > 0 && u && 5 < l && l < 121 && (i._$D0 = l), o = e[0] - } - for (; o < r && ("\n" != t[o] && "\r" != t[o]); ++o); - } else for (; o < r && ("\n" != t[o] && "\r" != t[o]); ++o); - } - return i._$AS = 1e3 * i._$yT / i._$D0 | 0, i - }, C.prototype.getDurationMSec = function() { - return this._$AS - }, C.prototype.dump = function() { - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t]; - console.log("_$wL[%s] [%d]. ", i._$4P, i._$I0.length); - for (var e = 0; e < i._$I0.length && e < 10; e++) console.log("%5.2f ,", i._$I0[e]); - console.log("\n") - } - }, C.prototype.updateParamExe = function(t, i, e, r) { - for (var o = i - r._$z2, n = o * this._$D0 / 1e3, s = 0 | n, _ = n - s, a = 0; a < this.motions.length; a++) { - var h = this.motions[a], - l = h._$I0.length, - $ = h._$4P; - if (h._$RP == B._$hs) { - var u = h._$I0[s >= l ? l - 1 : s]; - t.setParamFloat($, u) - } else if (B._$ws <= h._$RP && h._$RP <= B._$Ys); - else { - var p = t.getParamFloat($), - f = h._$I0[s >= l ? l - 1 : s], - c = h._$I0[s + 1 >= l ? l - 1 : s + 1], - d = f + (c - f) * _, - g = p + (d - p) * e; - t.setParamFloat($, g) - } - } - s >= this._$yT && (this._$E ? (r._$z2 = i, this.loopFadeIn && (r._$bs = i)) : r._$9L = !0) - }, C.prototype._$r0 = function() { - return this._$E - }, C.prototype._$aL = function(t) { - this._$E = t - }, C.prototype.isLoopFadeIn = function() { - return this.loopFadeIn - }, C.prototype.setLoopFadeIn = function(t) { - this.loopFadeIn = t - }, N.prototype.clear = function() { - this.size = 0 - }, N.prototype.add = function(t) { - if (this._$P.length <= this.size) { - var i = new Float32Array(2 * this.size); - w._$jT(this._$P, 0, i, 0, this.size), this._$P = i - } - this._$P[this.size++] = t - }, N.prototype._$BL = function() { - var t = new Float32Array(this.size); - return w._$jT(this._$P, 0, t, 0, this.size), t - }, B._$Fr = 0, B._$hs = 1, B._$ws = 100, B._$Ns = 101, B._$xs = 102, B._$us = 103, B._$qs = 104, B._$Ys = 105, U._$Ms = 1, U._$Qs = 2, U._$i2 = 0, U._$No = 2, U._$do = U._$Ms, U._$Ls = !0, U._$1r = 5, U._$Qb = 65, U._$J = 1e-4, U._$FT = .001, U._$Ss = 3, G._$o7 = 6, G._$S7 = 7, G._$s7 = 8, G._$77 = 9, G.LIVE2D_FORMAT_VERSION_V2_10_SDK2 = 10, G.LIVE2D_FORMAT_VERSION_V2_11_SDK2_1 = 11, G._$T7 = G.LIVE2D_FORMAT_VERSION_V2_11_SDK2_1, G._$Is = -2004318072, G._$h0 = 0, G._$4L = 23, G._$7P = 33, G._$uT = function(t) { - console.log("_$bo :: _$6 _$mo _$E0 : %d\n", t) - }, G._$9o = function(t) { - if (t < 40) return G._$uT(t), null; - if (t < 50) return G._$uT(t), null; - if (t < 60) return G._$uT(t), null; - if (t < 100) switch (t) { - case 65: - return new Z; - case 66: - return new D; - case 67: - return new x; - case 68: - return new z; - case 69: - return new P; - case 70: - return new $t; - default: - return G._$uT(t), null - } else if (t < 150) switch (t) { - case 131: - return new st; - case 133: - return new tt; - case 136: - return new p; - case 137: - return new ot; - case 142: - return new j - } - return G._$uT(t), null - }, Y._$HP = 0, Y._$_0 = !0; - Y._$V2 = -1, Y._$W0 = -1, Y._$jr = !1, Y._$ZS = !0, Y._$tr = -1e6, Y._$lr = 1e6, Y._$is = 32, Y._$e = !1, Y.prototype.getDrawDataIndex = function(t) { - for (var i = this._$aS.length - 1; i >= 0; --i) if (null != this._$aS[i] && this._$aS[i].getDrawDataID() == t) return i; - return -1 - }, Y.prototype.getDrawData = function(t) { - if (t instanceof b) { - if (null == this._$Bo) { - this._$Bo = new Object; - for (var i = this._$aS.length, e = 0; e < i; e++) { - var r = this._$aS[e], - o = r.getDrawDataID(); - null != o && (this._$Bo[o] = r) - } - } - return this._$Bo[id] - } - return t < this._$aS.length ? this._$aS[t] : null - }, Y.prototype.release = function() { - this._$3S.clear(), this._$aS.clear(), this._$F2.clear(), null != this._$Bo && this._$Bo.clear(), this._$db.clear(), this._$8b.clear(), this._$Hr.clear() - }, Y.prototype.init = function() { - this._$co++, this._$F2.length > 0 && this.release(); - for (var t = this._$Ri.getModelImpl(), i = t._$Xr(), r = i.length, o = new Array, n = new Array, s = 0; s < r; ++s) { - var _ = i[s]; - this._$F2.push(_), this._$Hr.push(_.init(this)); - for (var a = _.getBaseData(), h = a.length, l = 0; l < h; ++l) o.push(a[l]); - for (var l = 0; l < h; ++l) { - var $ = a[l].init(this); - $._$l2(s), n.push($) - } - for (var u = _.getDrawData(), p = u.length, l = 0; l < p; ++l) { - var f = u[l], - c = f.init(this); - c._$IP = s, this._$aS.push(f), this._$8b.push(c) - } - } - for (var d = o.length, g = yt._$2o();;) { - for (var y = !1, s = 0; s < d; ++s) { - var m = o[s]; - if (null != m) { - var T = m.getTargetBaseDataID(); - (null == T || T == g || this.getBaseDataIndex(T) >= 0) && (this._$3S.push(m), this._$db.push(n[s]), o[s] = null, y = !0) - } - } - if (!y) break - } - var P = t._$E2(); - if (null != P) { - var S = P._$1s(); - if (null != S) for (var v = S.length, s = 0; s < v; ++s) { - var L = S[s]; - null != L && this._$02(L.getParamID(), L.getDefaultValue(), L.getMinValue(), L.getMaxValue()) - } - } - this.clipManager = new e(this.dp_webgl), this.clipManager.init(this, this._$aS, this._$8b), this._$QT = !0 - }, Y.prototype.update = function() { - Y._$e && _.start("_$zL"); - for (var t = this._$_2.length, i = 0; i < t; i++) this._$_2[i] != this._$vr[i] && (this._$Js[i] = Y._$ZS, this._$vr[i] = this._$_2[i]); - var e = this._$3S.length, - r = this._$aS.length, - o = W._$or(), - n = W._$Pr(), - s = n - o + 1; - (null == this._$Ws || this._$Ws.length < s) && (this._$Ws = new Int16Array(s), this._$Vs = new Int16Array(s)); - for (var i = 0; i < s; i++) this._$Ws[i] = Y._$V2, this._$Vs[i] = Y._$V2; - (null == this._$Er || this._$Er.length < r) && (this._$Er = new Int16Array(r)); - for (var i = 0; i < r; i++) this._$Er[i] = Y._$W0; - Y._$e && _.dump("_$zL"), Y._$e && _.start("_$UL"); - for (var a = null, h = 0; h < e; ++h) { - var l = this._$3S[h], - $ = this._$db[h]; - try { - l._$Nr(this, $), l._$2b(this, $) - } catch (t) { - null == a && (a = t) - } - } - null != a && Y._$_0 && _._$Rb(a), Y._$e && _.dump("_$UL"), Y._$e && _.start("_$DL"); - for (var u = null, p = 0; p < r; ++p) { - var f = this._$aS[p], - c = this._$8b[p]; - try { - if (f._$Nr(this, c), c._$u2()) continue; - f._$2b(this, c); - var d, g = Math.floor(f._$zS(this, c) - o); - try { - d = this._$Vs[g] - } catch (t) { - console.log("_$li :: %s / %s \t\t\t\t@@_$fS\n", t.toString(), f.getDrawDataID().toString()), g = Math.floor(f._$zS(this, c) - o); - continue - } - d == Y._$V2 ? this._$Ws[g] = p : this._$Er[d] = p, this._$Vs[g] = p - } catch (t) { - null == u && (u = t, at._$sT(at._$H7)) - } - } - null != u && Y._$_0 && _._$Rb(u), Y._$e && _.dump("_$DL"), Y._$e && _.start("_$eL"); - for (var i = this._$Js.length - 1; i >= 0; i--) this._$Js[i] = Y._$jr; - return this._$QT = !1, Y._$e && _.dump("_$eL"), !1 - }, Y.prototype.preDraw = function(t) { - null != this.clipManager && (t._$ZT(), this.clipManager.setupClip(this, t)) - }, Y.prototype.draw = function(t) { - if (null == this._$Ws) return void _._$li("call _$Ri.update() before _$Ri.draw() "); - var i = this._$Ws.length; - t._$ZT(); - for (var e = 0; e < i; ++e) { - var r = this._$Ws[e]; - if (r != Y._$V2) for (;;) { - var o = this._$aS[r], - n = this._$8b[r]; - if (n._$yo()) { - var s = n._$IP, - a = this._$Hr[s]; - n._$VS = a.getPartsOpacity(), o.draw(t, this, n) - } - var h = this._$Er[r]; - if (h <= r || h == Y._$W0) break; - r = h - } - } - }, Y.prototype.getParamIndex = function(t) { - for (var i = this._$pb.length - 1; i >= 0; --i) if (this._$pb[i] == t) return i; - return this._$02(t, 0, Y._$tr, Y._$lr) - }, Y.prototype._$BS = function(t) { - return this.getBaseDataIndex(t) - }, Y.prototype.getBaseDataIndex = function(t) { - for (var i = this._$3S.length - 1; i >= 0; --i) if (null != this._$3S[i] && this._$3S[i].getBaseDataID() == t) return i; - return -1 - }, Y.prototype._$UT = function(t, i) { - var e = new Float32Array(i); - return w._$jT(t, 0, e, 0, t.length), e - }, Y.prototype._$02 = function(t, i, e, r) { - if (this._$qo >= this._$pb.length) { - var o = this._$pb.length, - n = new Array(2 * o); - w._$jT(this._$pb, 0, n, 0, o), this._$pb = n, this._$_2 = this._$UT(this._$_2, 2 * o), this._$vr = this._$UT(this._$vr, 2 * o), this._$Rr = this._$UT(this._$Rr, 2 * o), this._$Or = this._$UT(this._$Or, 2 * o); - var s = new Array; - w._$jT(this._$Js, 0, s, 0, o), this._$Js = s - } - return this._$pb[this._$qo] = t, this._$_2[this._$qo] = i, this._$vr[this._$qo] = i, this._$Rr[this._$qo] = e, this._$Or[this._$qo] = r, this._$Js[this._$qo] = Y._$ZS, this._$qo++ - }, Y.prototype._$Zo = function(t, i) { - this._$3S[t] = i - }, Y.prototype.setParamFloat = function(t, i) { - i < this._$Rr[t] && (i = this._$Rr[t]), i > this._$Or[t] && (i = this._$Or[t]), this._$_2[t] = i - }, Y.prototype.loadParam = function() { - var t = this._$_2.length; - t > this._$fs.length && (t = this._$fs.length), w._$jT(this._$fs, 0, this._$_2, 0, t) - }, Y.prototype.saveParam = function() { - var t = this._$_2.length; - t > this._$fs.length && (this._$fs = new Float32Array(t)), w._$jT(this._$_2, 0, this._$fs, 0, t) - }, Y.prototype._$v2 = function() { - return this._$co - }, Y.prototype._$WS = function() { - return this._$QT - }, Y.prototype._$Xb = function(t) { - return this._$Js[t] == Y._$ZS - }, Y.prototype._$vs = function() { - return this._$Es - }, Y.prototype._$Tr = function() { - return this._$ZP - }, Y.prototype.getBaseData = function(t) { - return this._$3S[t] - }, Y.prototype.getParamFloat = function(t) { - return this._$_2[t] - }, Y.prototype.getParamMax = function(t) { - return this._$Or[t] - }, Y.prototype.getParamMin = function(t) { - return this._$Rr[t] - }, Y.prototype.setPartsOpacity = function(t, i) { - this._$Hr[t].setPartsOpacity(i) - }, Y.prototype.getPartsOpacity = function(t) { - return this._$Hr[t].getPartsOpacity() - }, Y.prototype.getPartsDataIndex = function(t) { - for (var i = this._$F2.length - 1; i >= 0; --i) if (null != this._$F2[i] && this._$F2[i]._$p2() == t) return i; - return -1 - }, Y.prototype._$q2 = function(t) { - return this._$db[t] - }, Y.prototype._$C2 = function(t) { - return this._$8b[t] - }, Y.prototype._$Bb = function(t) { - return this._$Hr[t] - }, Y.prototype._$5s = function(t, i) { - for (var e = this._$Ws.length, r = t, o = 0; o < e; ++o) { - var n = this._$Ws[o]; - if (n != Y._$V2) for (;;) { - var s = this._$8b[n]; - s._$yo() && (s._$GT()._$B2(this, s, r), r += i); - var _ = this._$Er[n]; - if (_ <= n || _ == Y._$W0) break; - n = _ - } - } - }, Y.prototype.setDrawParam = function(t) { - this.dp_webgl = t - }, Y.prototype.getDrawParam = function() { - return this.dp_webgl - }, k._$0T = function(t) { - return k._$0T(new _$5(t)) - }, k._$0T = function(t) { - if (!t.exists()) throw new _$ls(t._$3b()); - for (var i, e = t.length(), r = new Int8Array(e), o = new _$Xs(new _$kb(t), 8192), n = 0; - (i = o.read(r, n, e - n)) > 0;) n += i; - return r - }, k._$C = function(t) { - var i = null, - e = null; - try { - i = t instanceof Array ? t : new _$Xs(t, 8192), e = new _$js; - for (var r, o = new Int8Array(1e3); - (r = i.read(o)) > 0;) e.write(o, 0, r); - return e._$TS() - } finally { - null != t && t.close(), null != e && (e.flush(), e.close()) - } - }, V.prototype._$T2 = function() { - return w.getUserTimeMSec() + Math._$10() * (2 * this._$Br - 1) - }, V.prototype._$uo = function(t) { - this._$Br = t - }, V.prototype._$QS = function(t, i, e) { - this._$Dr = t, this._$Cb = i, this._$mr = e - }, V.prototype._$7T = function(t) { - var i, e = w.getUserTimeMSec(), - r = 0; - switch (this._$_L) { - case STATE_CLOSING: - r = (e - this._$bb) / this._$Dr, r >= 1 && (r = 1, this._$_L = wt.STATE_CLOSED, this._$bb = e), i = 1 - r; - break; - case STATE_CLOSED: - r = (e - this._$bb) / this._$Cb, r >= 1 && (this._$_L = wt.STATE_OPENING, this._$bb = e), i = 0; - break; - case STATE_OPENING: - r = (e - this._$bb) / this._$mr, r >= 1 && (r = 1, this._$_L = wt.STATE_INTERVAL, this._$12 = this._$T2()), i = r; - break; - case STATE_INTERVAL: - this._$12 < e && (this._$_L = wt.STATE_CLOSING, this._$bb = e), i = 1; - break; - case STATE_FIRST: - default: - this._$_L = wt.STATE_INTERVAL, this._$12 = this._$T2(), i = 1 - } - this._$jo || (i = -i), t.setParamFloat(this._$iL, i), t.setParamFloat(this._$0L, i) - }; - var wt = function() {}; - wt.STATE_FIRST = "STATE_FIRST", wt.STATE_INTERVAL = "STATE_INTERVAL", wt.STATE_CLOSING = "STATE_CLOSING", wt.STATE_CLOSED = "STATE_CLOSED", wt.STATE_OPENING = "STATE_OPENING", X.prototype = new E, X._$As = 32, X._$Gr = !1, X._$NT = null, X._$vS = null, X._$no = null, X._$9r = function(t) { - return new Float32Array(t) - }, X._$vb = function(t) { - return new Int16Array(t) - }, X._$cr = function(t, i) { - return null == t || t._$yL() < i.length ? (t = X._$9r(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, X._$mb = function(t, i) { - return null == t || t._$yL() < i.length ? (t = X._$vb(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, X._$Hs = function() { - return X._$Gr - }, X._$as = function(t) { - X._$Gr = t - }, X.prototype.setGL = function(t) { - this.gl = t - }, X.prototype.setTransform = function(t) { - this.transform = t - }, X.prototype._$ZT = function() {}, X.prototype._$Uo = function(t, i, e, r, o, n, s, _) { - if (!(n < .01)) { - var a = this._$U2[t], - h = n > .9 ? at.EXPAND_W : 0; - this.gl.drawElements(a, e, r, o, n, h, this.transform, _) - } - }, X.prototype._$Rs = function() { - throw new Error("_$Rs") - }, X.prototype._$Ds = function(t) { - throw new Error("_$Ds") - }, X.prototype._$K2 = function() { - for (var t = 0; t < this._$sb.length; t++) { - 0 != this._$sb[t] && (this.gl._$Sr(1, this._$sb, t), this._$sb[t] = 0) - } - }, X.prototype.setTexture = function(t, i) { - this._$sb.length < t + 1 && this._$nS(t), this._$sb[t] = i - }, X.prototype.setTexture = function(t, i) { - this._$sb.length < t + 1 && this._$nS(t), this._$U2[t] = i - }, X.prototype._$nS = function(t) { - var i = Math.max(2 * this._$sb.length, t + 1 + 10), - e = new Int32Array(i); - w._$jT(this._$sb, 0, e, 0, this._$sb.length), this._$sb = e; - var r = new Array; - w._$jT(this._$U2, 0, r, 0, this._$U2.length), this._$U2 = r - }, z.prototype = new I, z._$Xo = new Float32Array(2), z._$io = new Float32Array(2), z._$0o = new Float32Array(2), z._$Lo = new Float32Array(2), z._$To = new Float32Array(2), z._$Po = new Float32Array(2), z._$gT = new Array, z.prototype._$zP = function() { - this._$GS = new D, this._$GS._$zP(), this._$Y0 = new Array - }, z.prototype.getType = function() { - return I._$c2 - }, z.prototype._$F0 = function(t) { - I.prototype._$F0.call(this, t), this._$GS = t._$nP(), this._$Y0 = t._$nP(), I.prototype.readV2_opacity.call(this, t) - }, z.prototype.init = function(t) { - var i = new H(this); - return i._$Yr = new P, this._$32() && (i._$Wr = new P), i - }, z.prototype._$Nr = function(t, i) { - this != i._$GT() && console.log("### assert!! ### "); - var e = i; - if (this._$GS._$Ur(t)) { - var r = z._$gT; - r[0] = !1; - var o = this._$GS._$Q2(t, r); - i._$Ib(r[0]), this.interpolateOpacity(t, this._$GS, i, r); - var n = t._$vs(), - s = t._$Tr(); - if (this._$GS._$zr(n, s, o), o <= 0) { - var _ = this._$Y0[n[0]]; - e._$Yr.init(_) - } else if (1 == o) { - var _ = this._$Y0[n[0]], - a = this._$Y0[n[1]], - h = s[0]; - e._$Yr._$fL = _._$fL + (a._$fL - _._$fL) * h, e._$Yr._$gL = _._$gL + (a._$gL - _._$gL) * h, e._$Yr._$B0 = _._$B0 + (a._$B0 - _._$B0) * h, e._$Yr._$z0 = _._$z0 + (a._$z0 - _._$z0) * h, e._$Yr._$qT = _._$qT + (a._$qT - _._$qT) * h - } else if (2 == o) { - var _ = this._$Y0[n[0]], - a = this._$Y0[n[1]], - l = this._$Y0[n[2]], - $ = this._$Y0[n[3]], - h = s[0], - u = s[1], - p = _._$fL + (a._$fL - _._$fL) * h, - f = l._$fL + ($._$fL - l._$fL) * h; - e._$Yr._$fL = p + (f - p) * u, p = _._$gL + (a._$gL - _._$gL) * h, f = l._$gL + ($._$gL - l._$gL) * h, e._$Yr._$gL = p + (f - p) * u, p = _._$B0 + (a._$B0 - _._$B0) * h, f = l._$B0 + ($._$B0 - l._$B0) * h, e._$Yr._$B0 = p + (f - p) * u, p = _._$z0 + (a._$z0 - _._$z0) * h, f = l._$z0 + ($._$z0 - l._$z0) * h, e._$Yr._$z0 = p + (f - p) * u, p = _._$qT + (a._$qT - _._$qT) * h, f = l._$qT + ($._$qT - l._$qT) * h, e._$Yr._$qT = p + (f - p) * u - } else if (3 == o) { - var c = this._$Y0[n[0]], - d = this._$Y0[n[1]], - g = this._$Y0[n[2]], - y = this._$Y0[n[3]], - m = this._$Y0[n[4]], - T = this._$Y0[n[5]], - P = this._$Y0[n[6]], - S = this._$Y0[n[7]], - h = s[0], - u = s[1], - v = s[2], - p = c._$fL + (d._$fL - c._$fL) * h, - f = g._$fL + (y._$fL - g._$fL) * h, - L = m._$fL + (T._$fL - m._$fL) * h, - M = P._$fL + (S._$fL - P._$fL) * h; - e._$Yr._$fL = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$gL + (d._$gL - c._$gL) * h, f = g._$gL + (y._$gL - g._$gL) * h, L = m._$gL + (T._$gL - m._$gL) * h, M = P._$gL + (S._$gL - P._$gL) * h, e._$Yr._$gL = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$B0 + (d._$B0 - c._$B0) * h, f = g._$B0 + (y._$B0 - g._$B0) * h, L = m._$B0 + (T._$B0 - m._$B0) * h, M = P._$B0 + (S._$B0 - P._$B0) * h, e._$Yr._$B0 = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$z0 + (d._$z0 - c._$z0) * h, f = g._$z0 + (y._$z0 - g._$z0) * h, L = m._$z0 + (T._$z0 - m._$z0) * h, M = P._$z0 + (S._$z0 - P._$z0) * h, e._$Yr._$z0 = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$qT + (d._$qT - c._$qT) * h, f = g._$qT + (y._$qT - g._$qT) * h, L = m._$qT + (T._$qT - m._$qT) * h, M = P._$qT + (S._$qT - P._$qT) * h, e._$Yr._$qT = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u) - } else if (4 == o) { - var E = this._$Y0[n[0]], - A = this._$Y0[n[1]], - I = this._$Y0[n[2]], - w = this._$Y0[n[3]], - x = this._$Y0[n[4]], - O = this._$Y0[n[5]], - D = this._$Y0[n[6]], - R = this._$Y0[n[7]], - b = this._$Y0[n[8]], - F = this._$Y0[n[9]], - C = this._$Y0[n[10]], - N = this._$Y0[n[11]], - B = this._$Y0[n[12]], - U = this._$Y0[n[13]], - G = this._$Y0[n[14]], - Y = this._$Y0[n[15]], - h = s[0], - u = s[1], - v = s[2], - k = s[3], - p = E._$fL + (A._$fL - E._$fL) * h, - f = I._$fL + (w._$fL - I._$fL) * h, - L = x._$fL + (O._$fL - x._$fL) * h, - M = D._$fL + (R._$fL - D._$fL) * h, - V = b._$fL + (F._$fL - b._$fL) * h, - X = C._$fL + (N._$fL - C._$fL) * h, - H = B._$fL + (U._$fL - B._$fL) * h, - W = G._$fL + (Y._$fL - G._$fL) * h; - e._$Yr._$fL = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$gL + (A._$gL - E._$gL) * h, f = I._$gL + (w._$gL - I._$gL) * h, L = x._$gL + (O._$gL - x._$gL) * h, M = D._$gL + (R._$gL - D._$gL) * h, V = b._$gL + (F._$gL - b._$gL) * h, X = C._$gL + (N._$gL - C._$gL) * h, H = B._$gL + (U._$gL - B._$gL) * h, W = G._$gL + (Y._$gL - G._$gL) * h, e._$Yr._$gL = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$B0 + (A._$B0 - E._$B0) * h, f = I._$B0 + (w._$B0 - I._$B0) * h, L = x._$B0 + (O._$B0 - x._$B0) * h, M = D._$B0 + (R._$B0 - D._$B0) * h, V = b._$B0 + (F._$B0 - b._$B0) * h, X = C._$B0 + (N._$B0 - C._$B0) * h, H = B._$B0 + (U._$B0 - B._$B0) * h, W = G._$B0 + (Y._$B0 - G._$B0) * h, e._$Yr._$B0 = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$z0 + (A._$z0 - E._$z0) * h, f = I._$z0 + (w._$z0 - I._$z0) * h, L = x._$z0 + (O._$z0 - x._$z0) * h, M = D._$z0 + (R._$z0 - D._$z0) * h, V = b._$z0 + (F._$z0 - b._$z0) * h, X = C._$z0 + (N._$z0 - C._$z0) * h, H = B._$z0 + (U._$z0 - B._$z0) * h, W = G._$z0 + (Y._$z0 - G._$z0) * h, e._$Yr._$z0 = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$qT + (A._$qT - E._$qT) * h, f = I._$qT + (w._$qT - I._$qT) * h, L = x._$qT + (O._$qT - x._$qT) * h, M = D._$qT + (R._$qT - D._$qT) * h, V = b._$qT + (F._$qT - b._$qT) * h, X = C._$qT + (N._$qT - C._$qT) * h, H = B._$qT + (U._$qT - B._$qT) * h, W = G._$qT + (Y._$qT - G._$qT) * h, e._$Yr._$qT = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)) - } else { - for (var j = 0 | Math.pow(2, o), q = new Float32Array(j), J = 0; J < j; J++) { - for (var Q = J, Z = 1, K = 0; K < o; K++) Z *= Q % 2 == 0 ? 1 - s[K] : s[K], Q /= 2; - q[J] = Z - } - for (var tt = new Array, it = 0; it < j; it++) tt[it] = this._$Y0[n[it]]; - for (var et = 0, rt = 0, ot = 0, nt = 0, st = 0, it = 0; it < j; it++) et += q[it] * tt[it]._$fL, rt += q[it] * tt[it]._$gL, ot += q[it] * tt[it]._$B0, nt += q[it] * tt[it]._$z0, st += q[it] * tt[it]._$qT; - e._$Yr._$fL = et, e._$Yr._$gL = rt, e._$Yr._$B0 = ot, e._$Yr._$z0 = nt, e._$Yr._$qT = st - } - var _ = this._$Y0[n[0]]; - e._$Yr.reflectX = _.reflectX, e._$Yr.reflectY = _.reflectY - } - }, z.prototype._$2b = function(t, i) { - this != i._$GT() && console.log("### assert!! ### "); - var e = i; - if (e._$hS(!0), this._$32()) { - var r = this.getTargetBaseDataID(); - if (e._$8r == I._$ur && (e._$8r = t.getBaseDataIndex(r)), e._$8r < 0) at._$so && _._$li("_$L _$0P _$G :: %s", r), e._$hS(!1); - else { - var o = t.getBaseData(e._$8r); - if (null != o) { - var n = t._$q2(e._$8r), - s = z._$Xo; - s[0] = e._$Yr._$fL, s[1] = e._$Yr._$gL; - var a = z._$io; - a[0] = 0, a[1] = -.1; - n._$GT().getType() == I._$c2 ? a[1] = -10 : a[1] = -.1; - var h = z._$0o; - this._$Jr(t, o, n, s, a, h); - var l = Lt._$92(a, h); - o._$nb(t, n, s, s, 1, 0, 2), e._$Wr._$fL = s[0], e._$Wr._$gL = s[1], e._$Wr._$B0 = e._$Yr._$B0, e._$Wr._$z0 = e._$Yr._$z0, e._$Wr._$qT = e._$Yr._$qT - l * Lt._$NS; - var $ = n.getTotalScale(); - e.setTotalScale_notForClient($ * e._$Wr._$B0); - var u = n.getTotalOpacity(); - e.setTotalOpacity(u * e.getInterpolatedOpacity()), e._$Wr.reflectX = e._$Yr.reflectX, e._$Wr.reflectY = e._$Yr.reflectY, e._$hS(n._$yo()) - } else e._$hS(!1) - } - } else e.setTotalScale_notForClient(e._$Yr._$B0), e.setTotalOpacity(e.getInterpolatedOpacity()) - }, z.prototype._$nb = function(t, i, e, r, o, n, s) { - this != i._$GT() && console.log("### assert!! ### "); - for (var _, a, h = i, l = null != h._$Wr ? h._$Wr : h._$Yr, $ = Math.sin(Lt._$bS * l._$qT), u = Math.cos(Lt._$bS * l._$qT), p = h.getTotalScale(), f = l.reflectX ? -1 : 1, c = l.reflectY ? -1 : 1, d = u * p * f, g = -$ * p * c, y = $ * p * f, m = u * p * c, T = l._$fL, P = l._$gL, S = o * s, v = n; v < S; v += s) _ = e[v], a = e[v + 1], r[v] = d * _ + g * a + T, r[v + 1] = y * _ + m * a + P - }, z.prototype._$Jr = function(t, i, e, r, o, n) { - i != e._$GT() && console.log("### assert!! ### "); - var s = z._$Lo; - z._$Lo[0] = r[0], z._$Lo[1] = r[1], i._$nb(t, e, s, s, 1, 0, 2); - for (var _ = z._$To, a = z._$Po, h = 1, l = 0; l < 10; l++) { - if (a[0] = r[0] + h * o[0], a[1] = r[1] + h * o[1], i._$nb(t, e, a, _, 1, 0, 2), _[0] -= s[0], _[1] -= s[1], 0 != _[0] || 0 != _[1]) return n[0] = _[0], void(n[1] = _[1]); - if (a[0] = r[0] - h * o[0], a[1] = r[1] - h * o[1], i._$nb(t, e, a, _, 1, 0, 2), _[0] -= s[0], _[1] -= s[1], 0 != _[0] || 0 != _[1]) return _[0] = -_[0], _[0] = -_[0], n[0] = _[0], void(n[1] = _[1]); - h *= .1 - } - at._$so && console.log("_$L0 to transform _$SP\n") - }, H.prototype = new _t, W.prototype = new M, W._$ur = -2, W._$ES = 500, W._$wb = 2, W._$8S = 3, W._$os = 4, W._$52 = W._$ES, W._$R2 = W._$ES, W._$Sb = function(t) { - for (var i = t.length - 1; i >= 0; --i) { - var e = t[i]; - e < W._$52 ? W._$52 = e : e > W._$R2 && (W._$R2 = e) - } - }, W._$or = function() { - return W._$52 - }, W._$Pr = function() { - return W._$R2 - }, W.prototype._$F0 = function(t) { - this._$gP = t._$nP(), this._$dr = t._$nP(), this._$GS = t._$nP(), this._$qb = t._$6L(), this._$Lb = t._$cS(), this._$mS = t._$Tb(), t.getFormatVersion() >= G._$T7 ? (this.clipID = t._$nP(), this.clipIDList = this.convertClipIDForV2_11(this.clipID)) : this.clipIDList = null, W._$Sb(this._$Lb) - }, W.prototype.getClipIDList = function() { - return this.clipIDList - }, W.prototype._$Nr = function(t, i) { - if (i._$IS[0] = !1, i._$Us = v._$Z2(t, this._$GS, i._$IS, this._$Lb), at._$Zs); - else if (i._$IS[0]) return; - i._$7s = v._$br(t, this._$GS, i._$IS, this._$mS) - }, W.prototype._$2b = function(t) {}, W.prototype.getDrawDataID = function() { - return this._$gP - }, W.prototype._$j2 = function(t) { - this._$gP = t - }, W.prototype.getOpacity = function(t, i) { - return i._$7s - }, W.prototype._$zS = function(t, i) { - return i._$Us - }, W.prototype.getTargetBaseDataID = function() { - return this._$dr - }, W.prototype._$gs = function(t) { - this._$dr = t - }, W.prototype._$32 = function() { - return null != this._$dr && this._$dr != yt._$2o() - }, W.prototype.getType = function() {}, j._$42 = 0, j.prototype._$1b = function() { - return this._$3S - }, j.prototype.getDrawDataList = function() { - return this._$aS - }, j.prototype._$F0 = function(t) { - this._$NL = t._$nP(), this._$aS = t._$nP(), this._$3S = t._$nP() - }, j.prototype._$kr = function(t) { - t._$Zo(this._$3S), t._$xo(this._$aS), this._$3S = null, this._$aS = null - }, q.prototype = new i, q.loadModel = function(t) { - var e = new q; - return i._$62(e, t), e - }, q.loadModel = function(t) { - var e = new q; - return i._$62(e, t), e - }, q._$to = function() { - return new q - }, q._$er = function(t) { - var i = new _$5("../_$_r/_$t0/_$Ri/_$_P._$d"); - if (0 == i.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + i._$PL()); - for (var e = ["../_$_r/_$t0/_$Ri/_$_P.512/_$CP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$vP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$EP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$pP._$1"], r = q.loadModel(i._$3b()), o = 0; o < e.length; o++) { - var n = new _$5(e[o]); - if (0 == n.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + n._$PL()); - r.setTexture(o, _$nL._$_o(t, n._$3b())) - } - return r - }, q.prototype.setGL = function(t) { - this._$zo.setGL(t) - }, q.prototype.setTransform = function(t) { - this._$zo.setTransform(t) - }, q.prototype.draw = function() { - this._$5S.draw(this._$zo) - }, q.prototype._$K2 = function() { - this._$zo._$K2() - }, q.prototype.setTexture = function(t, i) { - null == this._$zo && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this._$zo.setTexture(t, i) - }, q.prototype.setTexture = function(t, i) { - null == this._$zo && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this._$zo.setTexture(t, i) - }, q.prototype._$Rs = function() { - return this._$zo._$Rs() - }, q.prototype._$Ds = function(t) { - this._$zo._$Ds(t) - }, q.prototype.getDrawParam = function() { - return this._$zo - }, J.prototype = new s, J._$cs = "VISIBLE:", J._$ar = "LAYOUT:", J.MTN_PREFIX_FADEIN = "FADEIN:", J.MTN_PREFIX_FADEOUT = "FADEOUT:", J._$Co = 0, J._$1T = 1, J.loadMotion = function(t) { - var i = k._$C(t); - return J.loadMotion(i) - }, J.loadMotion = function(t) { - t instanceof ArrayBuffer && (t = new DataView(t)); - var i = new J, - e = [0], - r = t.byteLength; - i._$yT = 0; - for (var o = 0; o < r; ++o) { - var n = Q(t, o), - s = n.charCodeAt(0); - if ("\n" != n && "\r" != n) if ("#" != n) if ("$" != n) { - if (97 <= s && s <= 122 || 65 <= s && s <= 90 || "_" == n) { - for (var _ = o, a = -1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("=" == n) { - a = o; - break - } - if (a >= 0) { - var h = new B; - O.startsWith(t, _, J._$cs) ? (h._$RP = B._$hs, h._$4P = O.createString(t, _, a - _)) : O.startsWith(t, _, J._$ar) ? (h._$4P = O.createString(t, _ + 7, a - _ - 7), O.startsWith(t, _ + 7, "ANCHOR_X") ? h._$RP = B._$xs : O.startsWith(t, _ + 7, "ANCHOR_Y") ? h._$RP = B._$us : O.startsWith(t, _ + 7, "SCALE_X") ? h._$RP = B._$qs : O.startsWith(t, _ + 7, "SCALE_Y") ? h._$RP = B._$Ys : O.startsWith(t, _ + 7, "X") ? h._$RP = B._$ws : O.startsWith(t, _ + 7, "Y") && (h._$RP = B._$Ns)) : (h._$RP = B._$Fr, h._$4P = O.createString(t, _, a - _)), i.motions.push(h); - var l = 0, - $ = []; - for (o = a + 1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var u = O._$LS(t, r, o, e); - if (e[0] > 0) { - $.push(u), l++; - var p = e[0]; - if (p < o) { - console.log("_$n0 _$hi . @Live2DMotion loadMotion()\n"); - break - } - o = p - 1 - } - } - h._$I0 = new Float32Array($), l > i._$yT && (i._$yT = l) - } - } - } else { - for (var _ = o, a = -1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("=" == n) { - a = o; - break - } - var f = !1; - if (a >= 0) for (a == _ + 4 && "f" == Q(t, _ + 1) && "p" == Q(t, _ + 2) && "s" == Q(t, _ + 3) && (f = !0), o = a + 1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var u = O._$LS(t, r, o, e); - e[0] > 0 && f && 5 < u && u < 121 && (i._$D0 = u), o = e[0] - } - for (; o < r && ("\n" != Q(t, o) && "\r" != Q(t, o)); ++o); - } else for (; o < r && ("\n" != Q(t, o) && "\r" != Q(t, o)); ++o); - } - return i._$rr = 1e3 * i._$yT / i._$D0 | 0, i - }, J.prototype.getDurationMSec = function() { - return this._$E ? -1 : this._$rr - }, J.prototype.getLoopDurationMSec = function() { - return this._$rr - }, J.prototype.dump = function() { - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t]; - console.log("_$wL[%s] [%d]. ", i._$4P, i._$I0.length); - for (var e = 0; e < i._$I0.length && e < 10; e++) console.log("%5.2f ,", i._$I0[e]); - console.log("\n") - } - }, J.prototype.updateParamExe = function(t, i, e, r) { - for (var o = i - r._$z2, n = o * this._$D0 / 1e3, s = 0 | n, _ = n - s, a = 0; a < this.motions.length; a++) { - var h = this.motions[a], - l = h._$I0.length, - $ = h._$4P; - if (h._$RP == B._$hs) { - var u = h._$I0[s >= l ? l - 1 : s]; - t.setParamFloat($, u) - } else if (B._$ws <= h._$RP && h._$RP <= B._$Ys); - else { - var p, f = t.getParamIndex($), - c = t.getModelContext(), - d = c.getParamMax(f), - g = c.getParamMin(f), - y = .4 * (d - g), - m = c.getParamFloat(f), - T = h._$I0[s >= l ? l - 1 : s], - P = h._$I0[s + 1 >= l ? l - 1 : s + 1]; - p = T < P && P - T > y || T > P && T - P > y ? T : T + (P - T) * _; - var S = m + (p - m) * e; - t.setParamFloat($, S) - } - } - s >= this._$yT && (this._$E ? (r._$z2 = i, this.loopFadeIn && (r._$bs = i)) : r._$9L = !0), this._$eP = e - }, J.prototype._$r0 = function() { - return this._$E - }, J.prototype._$aL = function(t) { - this._$E = t - }, J.prototype._$S0 = function() { - return this._$D0 - }, J.prototype._$U0 = function(t) { - this._$D0 = t - }, J.prototype.isLoopFadeIn = function() { - return this.loopFadeIn - }, J.prototype.setLoopFadeIn = function(t) { - this.loopFadeIn = t - }, N.prototype.clear = function() { - this.size = 0 - }, N.prototype.add = function(t) { - if (this._$P.length <= this.size) { - var i = new Float32Array(2 * this.size); - w._$jT(this._$P, 0, i, 0, this.size), this._$P = i - } - this._$P[this.size++] = t - }, N.prototype._$BL = function() { - var t = new Float32Array(this.size); - return w._$jT(this._$P, 0, t, 0, this.size), t - }, B._$Fr = 0, B._$hs = 1, B._$ws = 100, B._$Ns = 101, B._$xs = 102, B._$us = 103, B._$qs = 104, B._$Ys = 105, Z.prototype = new I, Z._$gT = new Array, Z.prototype._$zP = function() { - this._$GS = new D, this._$GS._$zP() - }, Z.prototype._$F0 = function(t) { - I.prototype._$F0.call(this, t), this._$A = t._$6L(), this._$o = t._$6L(), this._$GS = t._$nP(), this._$Eo = t._$nP(), I.prototype.readV2_opacity.call(this, t) - }, Z.prototype.init = function(t) { - var i = new K(this), - e = (this._$o + 1) * (this._$A + 1); - return null != i._$Cr && (i._$Cr = null), i._$Cr = new Float32Array(2 * e), null != i._$hr && (i._$hr = null), this._$32() ? i._$hr = new Float32Array(2 * e) : i._$hr = null, i - }, Z.prototype._$Nr = function(t, i) { - var e = i; - if (this._$GS._$Ur(t)) { - var r = this._$VT(), - o = Z._$gT; - o[0] = !1, v._$Vr(t, this._$GS, o, r, this._$Eo, e._$Cr, 0, 2), i._$Ib(o[0]), this.interpolateOpacity(t, this._$GS, i, o) - } - }, Z.prototype._$2b = function(t, i) { - var e = i; - if (e._$hS(!0), this._$32()) { - var r = this.getTargetBaseDataID(); - if (e._$8r == I._$ur && (e._$8r = t.getBaseDataIndex(r)), e._$8r < 0) at._$so && _._$li("_$L _$0P _$G :: %s", r), e._$hS(!1); - else { - var o = t.getBaseData(e._$8r), - n = t._$q2(e._$8r); - if (null != o && n._$yo()) { - var s = n.getTotalScale(); - e.setTotalScale_notForClient(s); - var a = n.getTotalOpacity(); - e.setTotalOpacity(a * e.getInterpolatedOpacity()), o._$nb(t, n, e._$Cr, e._$hr, this._$VT(), 0, 2), e._$hS(!0) - } else e._$hS(!1) - } - } else e.setTotalOpacity(e.getInterpolatedOpacity()) - }, Z.prototype._$nb = function(t, i, e, r, o, n, s) { - var _ = i, - a = null != _._$hr ? _._$hr : _._$Cr; - Z.transformPoints_sdk2(e, r, o, n, s, a, this._$o, this._$A) - }, Z.transformPoints_sdk2 = function(i, e, r, o, n, s, _, a) { - for (var h, l, $, u = r * n, p = 0, f = 0, c = 0, d = 0, g = 0, y = 0, m = !1, T = o; T < u; T += n) { - var P, S, v, L; - if (v = i[T], L = i[T + 1], P = v * _, S = L * a, P < 0 || S < 0 || _ <= P || a <= S) { - var M = _ + 1; - if (!m) { - m = !0, p = .25 * (s[2 * (0 + 0 * M)] + s[2 * (_ + 0 * M)] + s[2 * (0 + a * M)] + s[2 * (_ + a * M)]), f = .25 * (s[2 * (0 + 0 * M) + 1] + s[2 * (_ + 0 * M) + 1] + s[2 * (0 + a * M) + 1] + s[2 * (_ + a * M) + 1]); - var E = s[2 * (_ + a * M)] - s[2 * (0 + 0 * M)], - A = s[2 * (_ + a * M) + 1] - s[2 * (0 + 0 * M) + 1], - I = s[2 * (_ + 0 * M)] - s[2 * (0 + a * M)], - w = s[2 * (_ + 0 * M) + 1] - s[2 * (0 + a * M) + 1]; - c = .5 * (E + I), d = .5 * (A + w), g = .5 * (E - I), y = .5 * (A - w), p -= .5 * (c + g), f -= .5 * (d + y) - } - if (-2 < v && v < 3 && -2 < L && L < 3) if (v <= 0) if (L <= 0) { - var x = s[2 * (0 + 0 * M)], - O = s[2 * (0 + 0 * M) + 1], - D = p - 2 * c, - R = f - 2 * d, - b = p - 2 * g, - F = f - 2 * y, - C = p - 2 * c - 2 * g, - N = f - 2 * d - 2 * y, - B = .5 * (v - -2), - U = .5 * (L - -2); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L >= 1) { - var b = s[2 * (0 + a * M)], - F = s[2 * (0 + a * M) + 1], - C = p - 2 * c + 1 * g, - N = f - 2 * d + 1 * y, - x = p + 3 * g, - O = f + 3 * y, - D = p - 2 * c + 3 * g, - R = f - 2 * d + 3 * y, - B = .5 * (v - -2), - U = .5 * (L - 1); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else { - var G = 0 | S; - G == a && (G = a - 1); - var B = .5 * (v - -2), - U = S - G, - Y = G / a, - k = (G + 1) / a, - b = s[2 * (0 + G * M)], - F = s[2 * (0 + G * M) + 1], - x = s[2 * (0 + (G + 1) * M)], - O = s[2 * (0 + (G + 1) * M) + 1], - C = p - 2 * c + Y * g, - N = f - 2 * d + Y * y, - D = p - 2 * c + k * g, - R = f - 2 * d + k * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (1 <= v) if (L <= 0) { - var D = s[2 * (_ + 0 * M)], - R = s[2 * (_ + 0 * M) + 1], - x = p + 3 * c, - O = f + 3 * d, - C = p + 1 * c - 2 * g, - N = f + 1 * d - 2 * y, - b = p + 3 * c - 2 * g, - F = f + 3 * d - 2 * y, - B = .5 * (v - 1), - U = .5 * (L - -2); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L >= 1) { - var C = s[2 * (_ + a * M)], - N = s[2 * (_ + a * M) + 1], - b = p + 3 * c + 1 * g, - F = f + 3 * d + 1 * y, - D = p + 1 * c + 3 * g, - R = f + 1 * d + 3 * y, - x = p + 3 * c + 3 * g, - O = f + 3 * d + 3 * y, - B = .5 * (v - 1), - U = .5 * (L - 1); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else { - var G = 0 | S; - G == a && (G = a - 1); - var B = .5 * (v - 1), - U = S - G, - Y = G / a, - k = (G + 1) / a, - C = s[2 * (_ + G * M)], - N = s[2 * (_ + G * M) + 1], - D = s[2 * (_ + (G + 1) * M)], - R = s[2 * (_ + (G + 1) * M) + 1], - b = p + 3 * c + Y * g, - F = f + 3 * d + Y * y, - x = p + 3 * c + k * g, - O = f + 3 * d + k * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L <= 0) { - var V = 0 | P; - V == _ && (V = _ - 1); - var B = P - V, - U = .5 * (L - -2), - X = V / _, - z = (V + 1) / _, - D = s[2 * (V + 0 * M)], - R = s[2 * (V + 0 * M) + 1], - x = s[2 * (V + 1 + 0 * M)], - O = s[2 * (V + 1 + 0 * M) + 1], - C = p + X * c - 2 * g, - N = f + X * d - 2 * y, - b = p + z * c - 2 * g, - F = f + z * d - 2 * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L >= 1) { - var V = 0 | P; - V == _ && (V = _ - 1); - var B = P - V, - U = .5 * (L - 1), - X = V / _, - z = (V + 1) / _, - C = s[2 * (V + a * M)], - N = s[2 * (V + a * M) + 1], - b = s[2 * (V + 1 + a * M)], - F = s[2 * (V + 1 + a * M) + 1], - D = p + X * c + 3 * g, - R = f + X * d + 3 * y, - x = p + z * c + 3 * g, - O = f + z * d + 3 * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else t.err.printf("_$li calc : %.4f , %.4f\t\t\t\t\t@@BDBoxGrid\n", v, L); - else e[T] = p + v * c + L * g, e[T + 1] = f + v * d + L * y - } else l = P - (0 | P), $ = S - (0 | S), h = 2 * ((0 | P) + (0 | S) * (_ + 1)), l + $ < 1 ? (e[T] = s[h] * (1 - l - $) + s[h + 2] * l + s[h + 2 * (_ + 1)] * $, e[T + 1] = s[h + 1] * (1 - l - $) + s[h + 3] * l + s[h + 2 * (_ + 1) + 1] * $) : (e[T] = s[h + 2 * (_ + 1) + 2] * (l - 1 + $) + s[h + 2 * (_ + 1)] * (1 - l) + s[h + 2] * (1 - $), e[T + 1] = s[h + 2 * (_ + 1) + 3] * (l - 1 + $) + s[h + 2 * (_ + 1) + 1] * (1 - l) + s[h + 3] * (1 - $)) - } - }, Z.prototype.transformPoints_sdk1 = function(t, i, e, r, o, n, s) { - for (var _, a, h, l, $, u, p, f = i, c = this._$o, d = this._$A, g = o * s, y = null != f._$hr ? f._$hr : f._$Cr, m = n; m < g; m += s) at._$ts ? (_ = e[m], a = e[m + 1], _ < 0 ? _ = 0 : _ > 1 && (_ = 1), a < 0 ? a = 0 : a > 1 && (a = 1), _ *= c, a *= d, h = 0 | _, l = 0 | a, h > c - 1 && (h = c - 1), l > d - 1 && (l = d - 1), u = _ - h, p = a - l, $ = 2 * (h + l * (c + 1))) : (_ = e[m] * c, a = e[m + 1] * d, u = _ - (0 | _), p = a - (0 | a), $ = 2 * ((0 | _) + (0 | a) * (c + 1))), u + p < 1 ? (r[m] = y[$] * (1 - u - p) + y[$ + 2] * u + y[$ + 2 * (c + 1)] * p, r[m + 1] = y[$ + 1] * (1 - u - p) + y[$ + 3] * u + y[$ + 2 * (c + 1) + 1] * p) : (r[m] = y[$ + 2 * (c + 1) + 2] * (u - 1 + p) + y[$ + 2 * (c + 1)] * (1 - u) + y[$ + 2] * (1 - p), r[m + 1] = y[$ + 2 * (c + 1) + 3] * (u - 1 + p) + y[$ + 2 * (c + 1) + 1] * (1 - u) + y[$ + 3] * (1 - p)) - }, Z.prototype._$VT = function() { - return (this._$o + 1) * (this._$A + 1) - }, Z.prototype.getType = function() { - return I._$_b - }, K.prototype = new _t, tt._$42 = 0, tt.prototype._$zP = function() { - this._$3S = new Array, this._$aS = new Array - }, tt.prototype._$F0 = function(t) { - this._$g0 = t._$8L(), this.visible = t._$8L(), this._$NL = t._$nP(), this._$3S = t._$nP(), this._$aS = t._$nP() - }, tt.prototype.init = function(t) { - var i = new it(this); - return i.setPartsOpacity(this.isVisible() ? 1 : 0), i - }, tt.prototype._$6o = function(t) { - if (null == this._$3S) throw new Error("_$3S _$6 _$Wo@_$6o"); - this._$3S.push(t) - }, tt.prototype._$3o = function(t) { - if (null == this._$aS) throw new Error("_$aS _$6 _$Wo@_$3o"); - this._$aS.push(t) - }, tt.prototype._$Zo = function(t) { - this._$3S = t - }, tt.prototype._$xo = function(t) { - this._$aS = t - }, tt.prototype.isVisible = function() { - return this.visible - }, tt.prototype._$uL = function() { - return this._$g0 - }, tt.prototype._$KP = function(t) { - this.visible = t - }, tt.prototype._$ET = function(t) { - this._$g0 = t - }, tt.prototype.getBaseData = function() { - return this._$3S - }, tt.prototype.getDrawData = function() { - return this._$aS - }, tt.prototype._$p2 = function() { - return this._$NL - }, tt.prototype._$ob = function(t) { - this._$NL = t - }, tt.prototype.getPartsID = function() { - return this._$NL - }, tt.prototype._$MP = function(t) { - this._$NL = t - }, it.prototype = new $, it.prototype.getPartsOpacity = function() { - return this._$VS - }, it.prototype.setPartsOpacity = function(t) { - this._$VS = t - }, et._$L7 = function() { - u._$27(), yt._$27(), b._$27(), l._$27() - }, et.prototype.toString = function() { - return this.id - }, rt.prototype._$F0 = function(t) {}, ot.prototype._$1s = function() { - return this._$4S - }, ot.prototype._$zP = function() { - this._$4S = new Array - }, ot.prototype._$F0 = function(t) { - this._$4S = t._$nP() - }, ot.prototype._$Ks = function(t) { - this._$4S.push(t) - }, nt.tr = new gt, nt._$50 = new gt, nt._$Ti = new Array(0, 0), nt._$Pi = new Array(0, 0), nt._$B = new Array(0, 0), nt.prototype._$lP = function(t, i, e, r) { - this.viewport = new Array(t, i, e, r) - }, nt.prototype._$bL = function() { - this.context.save(); - var t = this.viewport; - null != t && (this.context.beginPath(), this.context._$Li(t[0], t[1], t[2], t[3]), this.context.clip()) - }, nt.prototype._$ei = function() { - this.context.restore() - }, nt.prototype.drawElements = function(t, i, e, r, o, n, s, a) { - try { - o != this._$Qo && (this._$Qo = o, this.context.globalAlpha = o); - for (var h = i.length, l = t.width, $ = t.height, u = this.context, p = this._$xP, f = this._$uP, c = this._$6r, d = this._$3r, g = nt.tr, y = nt._$Ti, m = nt._$Pi, T = nt._$B, P = 0; P < h; P += 3) { - u.save(); - var S = i[P], - v = i[P + 1], - L = i[P + 2], - M = p + c * e[2 * S], - E = f + d * e[2 * S + 1], - A = p + c * e[2 * v], - I = f + d * e[2 * v + 1], - w = p + c * e[2 * L], - x = f + d * e[2 * L + 1]; - s && (s._$PS(M, E, T), M = T[0], E = T[1], s._$PS(A, I, T), A = T[0], I = T[1], s._$PS(w, x, T), w = T[0], x = T[1]); - var O = l * r[2 * S], - D = $ - $ * r[2 * S + 1], - R = l * r[2 * v], - b = $ - $ * r[2 * v + 1], - F = l * r[2 * L], - C = $ - $ * r[2 * L + 1], - N = Math.atan2(b - D, R - O), - B = Math.atan2(I - E, A - M), - U = A - M, - G = I - E, - Y = Math.sqrt(U * U + G * G), - k = R - O, - V = b - D, - X = Math.sqrt(k * k + V * V), - z = Y / X; - It._$ni(F, C, O, D, R - O, b - D, -(b - D), R - O, y), It._$ni(w, x, M, E, A - M, I - E, -(I - E), A - M, m); - var H = (m[0] - y[0]) / y[1], - W = Math.min(O, R, F), - j = Math.max(O, R, F), - q = Math.min(D, b, C), - J = Math.max(D, b, C), - Q = Math.floor(W), - Z = Math.floor(q), - K = Math.ceil(j), - tt = Math.ceil(J); - g.identity(), g.translate(M, E), g.rotate(B), g.scale(1, m[1] / y[1]), g.shear(H, 0), g.scale(z, z), g.rotate(-N), g.translate(-O, -D), g.setContext(u); - if (n || (n = 1.2), at.IGNORE_EXPAND && (n = 0), at.USE_CACHED_POLYGON_IMAGE) { - var it = a._$e0; - if (it.gl_cacheImage = it.gl_cacheImage || {}, !it.gl_cacheImage[P]) { - var et = nt.createCanvas(K - Q, tt - Z); - at.DEBUG_DATA.LDGL_CANVAS_MB = at.DEBUG_DATA.LDGL_CANVAS_MB || 0, at.DEBUG_DATA.LDGL_CANVAS_MB += (K - Q) * (tt - Z) * 4; - var rt = et.getContext("2d"); - rt.translate(-Q, -Z), nt.clip(rt, g, n, Y, O, D, R, b, F, C, M, E, A, I, w, x), rt.drawImage(t, 0, 0), it.gl_cacheImage[P] = { - cacheCanvas: et, - cacheContext: rt - } - } - u.drawImage(it.gl_cacheImage[P].cacheCanvas, Q, Z) - } else at.IGNORE_CLIP || nt.clip(u, g, n, Y, O, D, R, b, F, C, M, E, A, I, w, x), at.USE_ADJUST_TRANSLATION && (W = 0, j = l, q = 0, J = $), u.drawImage(t, W, q, j - W, J - q, W, q, j - W, J - q); - u.restore() - } - } catch (t) { - _._$Rb(t) - } - }, nt.clip = function(t, i, e, r, o, n, s, _, a, h, l, $, u, p, f, c) { - e > .02 ? nt.expandClip(t, i, e, r, l, $, u, p, f, c) : nt.clipWithTransform(t, null, o, n, s, _, a, h) - }, nt.expandClip = function(t, i, e, r, o, n, s, _, a, h) { - var l = s - o, - $ = _ - n, - u = a - o, - p = h - n, - f = l * p - $ * u > 0 ? e : -e, - c = -$, - d = l, - g = a - s, - y = h - _, - m = -y, - T = g, - P = Math.sqrt(g * g + y * y), - S = -p, - v = u, - L = Math.sqrt(u * u + p * p), - M = o - f * c / r, - E = n - f * d / r, - A = s - f * c / r, - I = _ - f * d / r, - w = s - f * m / P, - x = _ - f * T / P, - O = a - f * m / P, - D = h - f * T / P, - R = o + f * S / L, - b = n + f * v / L, - F = a + f * S / L, - C = h + f * v / L, - N = nt._$50; - return null != i._$P2(N) && (nt.clipWithTransform(t, N, M, E, A, I, w, x, O, D, F, C, R, b), !0) - }, nt.clipWithTransform = function(t, i, e, r, o, n, s, a) { - if (arguments.length < 7) return void _._$li("err : @LDGL.clip()"); - if (!(arguments[1] instanceof gt)) return void _._$li("err : a[0] is _$6 LDTransform @LDGL.clip()"); - var h = nt._$B, - l = i, - $ = arguments; - if (t.beginPath(), l) { - l._$PS($[2], $[3], h), t.moveTo(h[0], h[1]); - for (var u = 4; u < $.length; u += 2) l._$PS($[u], $[u + 1], h), t.lineTo(h[0], h[1]) - } else { - t.moveTo($[2], $[3]); - for (var u = 4; u < $.length; u += 2) t.lineTo($[u], $[u + 1]) - } - t.clip() - }, nt.createCanvas = function(t, i) { - var e = document.createElement("canvas"); - return e.setAttribute("width", t), e.setAttribute("height", i), e || _._$li("err : " + e), e - }, nt.dumpValues = function() { - for (var t = "", i = 0; i < arguments.length; i++) t += "[" + i + "]= " + arguments[i].toFixed(3) + " , "; - console.log(t) - }, st.prototype._$F0 = function(t) { - this._$TT = t._$_T(), this._$LT = t._$_T(), this._$FS = t._$_T(), this._$wL = t._$nP() - }, st.prototype.getMinValue = function() { - return this._$TT - }, st.prototype.getMaxValue = function() { - return this._$LT - }, st.prototype.getDefaultValue = function() { - return this._$FS - }, st.prototype.getParamID = function() { - return this._$wL - }, _t.prototype._$yo = function() { - return this._$AT && !this._$JS - }, _t.prototype._$hS = function(t) { - this._$AT = t - }, _t.prototype._$GT = function() { - return this._$e0 - }, _t.prototype._$l2 = function(t) { - this._$IP = t - }, _t.prototype.getPartsIndex = function() { - return this._$IP - }, _t.prototype._$x2 = function() { - return this._$JS - }, _t.prototype._$Ib = function(t) { - this._$JS = t - }, _t.prototype.getTotalScale = function() { - return this.totalScale - }, _t.prototype.setTotalScale_notForClient = function(t) { - this.totalScale = t - }, _t.prototype.getInterpolatedOpacity = function() { - return this._$7s - }, _t.prototype.setInterpolatedOpacity = function(t) { - this._$7s = t - }, _t.prototype.getTotalOpacity = function(t) { - return this.totalOpacity - }, _t.prototype.setTotalOpacity = function(t) { - this.totalOpacity = t - }, at._$2s = "2.1.00_1", at._$Kr = 201001e3, at._$sP = !0, at._$so = !0, at._$cb = !1, at._$3T = !0, at._$Ts = !0, at._$fb = !0, at._$ts = !0, at.L2D_DEFORMER_EXTEND = !0, at._$Wb = !1; - at._$yr = !1, at._$Zs = !1, at.L2D_NO_ERROR = 0, at._$i7 = 1e3, at._$9s = 1001, at._$es = 1100, at._$r7 = 2e3, at._$07 = 2001, at._$b7 = 2002, at._$H7 = 4e3, at.L2D_COLOR_BLEND_MODE_MULT = 0, at.L2D_COLOR_BLEND_MODE_ADD = 1, at.L2D_COLOR_BLEND_MODE_INTERPOLATE = 2, at._$6b = !0, at._$cT = 0, at.clippingMaskBufferSize = 256, at.glContext = new Array, at.frameBuffers = new Array, at.fTexture = new Array, at.IGNORE_CLIP = !1, at.IGNORE_EXPAND = !1, at.EXPAND_W = 2, at.USE_ADJUST_TRANSLATION = !0, at.USE_CANVAS_TRANSFORM = !0, at.USE_CACHED_POLYGON_IMAGE = !1, at.DEBUG_DATA = {}, at.PROFILE_IOS_SPEED = { - PROFILE_NAME: "iOS Speed", - USE_ADJUST_TRANSLATION: !0, - USE_CACHED_POLYGON_IMAGE: !0, - EXPAND_W: 4 - }, at.PROFILE_IOS_QUALITY = { - PROFILE_NAME: "iOS HiQ", - USE_ADJUST_TRANSLATION: !0, - USE_CACHED_POLYGON_IMAGE: !1, - EXPAND_W: 2 - }, at.PROFILE_IOS_DEFAULT = at.PROFILE_IOS_QUALITY, at.PROFILE_ANDROID = { - PROFILE_NAME: "Android", - USE_ADJUST_TRANSLATION: !1, - USE_CACHED_POLYGON_IMAGE: !1, - EXPAND_W: 2 - }, at.PROFILE_DESKTOP = { - PROFILE_NAME: "Desktop", - USE_ADJUST_TRANSLATION: !1, - USE_CACHED_POLYGON_IMAGE: !1, - EXPAND_W: 2 - }, at.initProfile = function() { - Et.isIOS() ? at.setupProfile(at.PROFILE_IOS_DEFAULT) : Et.isAndroid() ? at.setupProfile(at.PROFILE_ANDROID) : at.setupProfile(at.PROFILE_DESKTOP) - }, at.setupProfile = function(t, i) { - if ("number" == typeof t) switch (t) { - case 9901: - t = at.PROFILE_IOS_SPEED; - break; - case 9902: - t = at.PROFILE_IOS_QUALITY; - break; - case 9903: - t = at.PROFILE_IOS_DEFAULT; - break; - case 9904: - t = at.PROFILE_ANDROID; - break; - case 9905: - t = at.PROFILE_DESKTOP; - break; - default: - alert("profile _$6 _$Ui : " + t) - } - arguments.length < 2 && (i = !0), i && console.log("profile : " + t.PROFILE_NAME); - for (var e in t) at[e] = t[e], i && console.log(" [" + e + "] = " + t[e]) - }, at.init = function() { - if (at._$6b) { - console.log("Live2D %s", at._$2s), at._$6b = !1; - !0, at.initProfile() - } - }, at.getVersionStr = function() { - return at._$2s - }, at.getVersionNo = function() { - return at._$Kr - }, at._$sT = function(t) { - at._$cT = t - }, at.getError = function() { - var t = at._$cT; - return at._$cT = 0, t - }, at.dispose = function() { - at.glContext = [], at.frameBuffers = [], at.fTexture = [] - }, at.setGL = function(t, i) { - var e = i || 0; - at.glContext[e] = t - }, at.getGL = function(t) { - return at.glContext[t] - }, at.setClippingMaskBufferSize = function(t) { - at.clippingMaskBufferSize = t - }, at.getClippingMaskBufferSize = function() { - return at.clippingMaskBufferSize - }, at.deleteBuffer = function(t) { - at.getGL(t).deleteFramebuffer(at.frameBuffers[t].framebuffer), delete at.frameBuffers[t], delete at.glContext[t] - }, ht._$r2 = function(t) { - return t < 0 ? 0 : t > 1 ? 1 : .5 - .5 * Math.cos(t * Lt.PI_F) - }, lt._$fr = -1, lt.prototype.toString = function() { - return this._$ib - }, $t.prototype = new W, $t._$42 = 0, $t._$Os = 30, $t._$ms = 0, $t._$ns = 1, $t._$_s = 2, $t._$gT = new Array, $t.prototype._$_S = function(t) { - this._$LP = t - }, $t.prototype.getTextureNo = function() { - return this._$LP - }, $t.prototype._$ZL = function() { - return this._$Qi - }, $t.prototype._$H2 = function() { - return this._$JP - }, $t.prototype.getNumPoints = function() { - return this._$d0 - }, $t.prototype.getType = function() { - return W._$wb - }, $t.prototype._$B2 = function(t, i, e) { - var r = i, - o = null != r._$hr ? r._$hr : r._$Cr; - switch (U._$do) { - default: - case U._$Ms: - throw new Error("_$L _$ro "); - case U._$Qs: - for (var n = this._$d0 - 1; n >= 0; --n) o[n * U._$No + 4] = e - } - }, $t.prototype._$zP = function() { - this._$GS = new D, this._$GS._$zP() - }, $t.prototype._$F0 = function(t) { - W.prototype._$F0.call(this, t), this._$LP = t._$6L(), this._$d0 = t._$6L(), this._$Yo = t._$6L(); - var i = t._$nP(); - this._$BP = new Int16Array(3 * this._$Yo); - for (var e = 3 * this._$Yo - 1; e >= 0; --e) this._$BP[e] = i[e]; - if (this._$Eo = t._$nP(), this._$Qi = t._$nP(), t.getFormatVersion() >= G._$s7) { - if (this._$JP = t._$6L(), 0 != this._$JP) { - if (0 != (1 & this._$JP)) { - var r = t._$6L(); - null == this._$5P && (this._$5P = new Object), this._$5P._$Hb = parseInt(r) - } - 0 != (this._$JP & $t._$Os) ? this._$6s = (this._$JP & $t._$Os) >> 1 : this._$6s = $t._$ms, 0 != (32 & this._$JP) && (this.culling = !1) - } - } else this._$JP = 0 - }, $t.prototype.init = function(t) { - var i = new ut(this), - e = this._$d0 * U._$No, - r = this._$32(); - switch (null != i._$Cr && (i._$Cr = null), i._$Cr = new Float32Array(e), null != i._$hr && (i._$hr = null), i._$hr = r ? new Float32Array(e) : null, U._$do) { - default: - case U._$Ms: - if (U._$Ls) for (var o = this._$d0 - 1; o >= 0; --o) { - var n = o << 1; - this._$Qi[n + 1] = 1 - this._$Qi[n + 1] - } - break; - case U._$Qs: - for (var o = this._$d0 - 1; o >= 0; --o) { - var n = o << 1, - s = o * U._$No, - _ = this._$Qi[n], - a = this._$Qi[n + 1]; - i._$Cr[s] = _, i._$Cr[s + 1] = a, i._$Cr[s + 4] = 0, r && (i._$hr[s] = _, i._$hr[s + 1] = a, i._$hr[s + 4] = 0) - } - } - return i - }, $t.prototype._$Nr = function(t, i) { - var e = i; - if (this != e._$GT() && console.log("### assert!! ### "), this._$GS._$Ur(t) && (W.prototype._$Nr.call(this, t, e), !e._$IS[0])) { - var r = $t._$gT; - r[0] = !1, v._$Vr(t, this._$GS, r, this._$d0, this._$Eo, e._$Cr, U._$i2, U._$No) - } - }, $t.prototype._$2b = function(t, i) { - try { - this != i._$GT() && console.log("### assert!! ### "); - var e = !1; - i._$IS[0] && (e = !0); - var r = i; - if (!e && (W.prototype._$2b.call(this, t), this._$32())) { - var o = this.getTargetBaseDataID(); - if (r._$8r == W._$ur && (r._$8r = t.getBaseDataIndex(o)), r._$8r < 0) at._$so && _._$li("_$L _$0P _$G :: %s", o); - else { - var n = t.getBaseData(r._$8r), - s = t._$q2(r._$8r); - null == n || s._$x2() ? r._$AT = !1 : (n._$nb(t, s, r._$Cr, r._$hr, this._$d0, U._$i2, U._$No), r._$AT = !0), r.baseOpacity = s.getTotalOpacity() - } - } - } catch (t) { - throw t - } - }, $t.prototype.draw = function(t, i, e) { - if (this != e._$GT() && console.log("### assert!! ### "), !e._$IS[0]) { - var r = e, - o = this._$LP; - o < 0 && (o = 1); - var n = this.getOpacity(i, r) * e._$VS * e.baseOpacity, - s = null != r._$hr ? r._$hr : r._$Cr; - t.setClipBufPre_clipContextForDraw(e.clipBufPre_clipContext), t._$WP(this.culling), t._$Uo(o, 3 * this._$Yo, this._$BP, s, this._$Qi, n, this._$6s, r) - } - }, $t.prototype.dump = function() { - console.log(" _$yi( %d ) , _$d0( %d ) , _$Yo( %d ) \n", this._$LP, this._$d0, this._$Yo), console.log(" _$Oi _$di = { "); - for (var t = 0; t < this._$BP.length; t++) console.log("%5d ,", this._$BP[t]); - console.log("\n _$5i _$30"); - for (var t = 0; t < this._$Eo.length; t++) { - console.log("\n _$30[%d] = ", t); - for (var i = this._$Eo[t], e = 0; e < i.length; e++) console.log("%6.2f, ", i[e]) - } - console.log("\n") - }, $t.prototype._$72 = function(t) { - return null == this._$5P ? null : this._$5P[t] - }, $t.prototype.getIndexArray = function() { - return this._$BP - }, ut.prototype = new Mt, ut.prototype.getTransformedPoints = function() { - return null != this._$hr ? this._$hr : this._$Cr - }, pt.prototype._$HT = function(t) { - this.x = t.x, this.y = t.y - }, pt.prototype._$HT = function(t, i) { - this.x = t, this.y = i - }, ft.prototype = new i, ft.loadModel = function(t) { - var e = new ft; - return i._$62(e, t), e - }, ft.loadModel = function(t, e) { - var r = e || 0, - o = new ft(r); - return i._$62(o, t), o - }, ft._$to = function() { - return new ft - }, ft._$er = function(t) { - var i = new _$5("../_$_r/_$t0/_$Ri/_$_P._$d"); - if (0 == i.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + i._$PL()); - for (var e = ["../_$_r/_$t0/_$Ri/_$_P.512/_$CP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$vP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$EP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$pP._$1"], r = ft.loadModel(i._$3b()), o = 0; o < e.length; o++) { - var n = new _$5(e[o]); - if (0 == n.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + n._$PL()); - r.setTexture(o, _$nL._$_o(t, n._$3b())) - } - return r - }, ft.prototype.setGL = function(t) { - at.setGL(t) - }, ft.prototype.setTransform = function(t) { - this.drawParamWebGL.setTransform(t) - }, ft.prototype.update = function() { - this._$5S.update(), this._$5S.preDraw(this.drawParamWebGL) - }, ft.prototype.draw = function() { - this._$5S.draw(this.drawParamWebGL) - }, ft.prototype._$K2 = function() { - this.drawParamWebGL._$K2() - }, ft.prototype.setTexture = function(t, i) { - null == this.drawParamWebGL && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this.drawParamWebGL.setTexture(t, i) - }, ft.prototype.setTexture = function(t, i) { - null == this.drawParamWebGL && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this.drawParamWebGL.setTexture(t, i) - }, ft.prototype._$Rs = function() { - return this.drawParamWebGL._$Rs() - }, ft.prototype._$Ds = function(t) { - this.drawParamWebGL._$Ds(t) - }, ft.prototype.getDrawParam = function() { - return this.drawParamWebGL - }, ft.prototype.setMatrix = function(t) { - this.drawParamWebGL.setMatrix(t) - }, ft.prototype.setPremultipliedAlpha = function(t) { - this.drawParamWebGL.setPremultipliedAlpha(t) - }, ft.prototype.isPremultipliedAlpha = function() { - return this.drawParamWebGL.isPremultipliedAlpha() - }, ft.prototype.setAnisotropy = function(t) { - this.drawParamWebGL.setAnisotropy(t) - }, ft.prototype.getAnisotropy = function() { - return this.drawParamWebGL.getAnisotropy() - }, ct.prototype._$tb = function() { - return this.motions - }, ct.prototype.startMotion = function(t, i) { - for (var e = null, r = this.motions.length, o = 0; o < r; ++o) null != (e = this.motions[o]) && (e._$qS(e._$w0.getFadeOut()), this._$eb && _._$Ji("MotionQueueManager[size:%2d]->startMotion() / start _$K _$3 (m%d)\n", r, e._$sr)); - if (null == t) return -1; - e = new dt, e._$w0 = t, this.motions.push(e); - var n = e._$sr; - return this._$eb && _._$Ji("MotionQueueManager[size:%2d]->startMotion() / new _$w0 (m%d)\n", r, n), n - }, ct.prototype.updateParam = function(t) { - try { - for (var i = !1, e = 0; e < this.motions.length; e++) { - var r = this.motions[e]; - if (null != r) { - var o = r._$w0; - null != o ? (o.updateParam(t, r), i = !0, r.isFinished() && (this._$eb && _._$Ji("MotionQueueManager[size:%2d]->updateParam() / _$T0 _$w0 (m%d)\n", this.motions.length - 1, r._$sr), this.motions.splice(e, 1), e--)) : (this.motions = this.motions.splice(e, 1), e--) - } else this.motions.splice(e, 1), e-- - } - return i - } catch (t) { - return _._$li(t), !0 - } - }, ct.prototype.isFinished = function(t) { - if (arguments.length >= 1) { - for (var i = 0; i < this.motions.length; i++) { - var e = this.motions[i]; - if (null != e && (e._$sr == t && !e.isFinished())) return !1 - } - return !0 - } - for (var i = 0; i < this.motions.length; i++) { - var e = this.motions[i]; - if (null != e) { - if (null != e._$w0) { - if (!e.isFinished()) return !1 - } else this.motions.splice(i, 1), i-- - } else this.motions.splice(i, 1), i-- - } - return !0 - }, ct.prototype.stopAllMotions = function() { - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t]; - if (null != i) { - i._$w0; - this.motions.splice(t, 1), t-- - } else this.motions.splice(t, 1), t-- - } - }, ct.prototype._$Zr = function(t) { - this._$eb = t - }, ct.prototype._$e = function() { - console.log("-- _$R --\n"); - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t], - e = i._$w0; - console.log("MotionQueueEnt[%d] :: %s\n", this.motions.length, e.toString()) - } - }, dt._$Gs = 0, dt.prototype.isFinished = function() { - return this._$9L - }, dt.prototype._$qS = function(t) { - var i = w.getUserTimeMSec(), - e = i + t; - (this._$Do < 0 || e < this._$Do) && (this._$Do = e) - }, dt.prototype._$Bs = function() { - return this._$sr - }, gt.prototype.setContext = function(t) { - var i = this.m; - t.transform(i[0], i[1], i[3], i[4], i[6], i[7]) - }, gt.prototype.toString = function() { - for (var t = "LDTransform { ", i = 0; i < 9; i++) t += this.m[i].toFixed(2) + " ,"; - return t += " }" - }, gt.prototype.identity = function() { - var t = this.m; - t[0] = t[4] = t[8] = 1, t[1] = t[2] = t[3] = t[5] = t[6] = t[7] = 0 - }, gt.prototype._$PS = function(t, i, e) { - null == e && (e = new Array(0, 0)); - var r = this.m; - return e[0] = r[0] * t + r[3] * i + r[6], e[1] = r[1] * t + r[4] * i + r[7], e - }, gt.prototype._$P2 = function(t) { - t || (t = new gt); - var i = this.m, - e = i[0], - r = i[1], - o = i[2], - n = i[3], - s = i[4], - _ = i[5], - a = i[6], - h = i[7], - l = i[8], - $ = e * s * l + r * _ * a + o * n * h - e * _ * h - o * s * a - r * n * l; - if (0 == $) return null; - var u = 1 / $; - return t.m[0] = u * (s * l - h * _), t.m[1] = u * (h * o - r * l), t.m[2] = u * (r * _ - s * o), t.m[3] = u * (a * _ - n * l), t.m[4] = u * (e * l - a * o), t.m[5] = u * (n * o - e * _), t.m[6] = u * (n * h - a * s), t.m[7] = u * (a * r - e * h), t.m[8] = u * (e * s - n * r), t - }, gt.prototype.transform = function(t, i, e) { - null == e && (e = new Array(0, 0)); - var r = this.m; - return e[0] = r[0] * t + r[3] * i + r[6], e[1] = r[1] * t + r[4] * i + r[7], e - }, gt.prototype.translate = function(t, i) { - var e = this.m; - e[6] = e[0] * t + e[3] * i + e[6], e[7] = e[1] * t + e[4] * i + e[7], e[8] = e[2] * t + e[5] * i + e[8] - }, gt.prototype.scale = function(t, i) { - var e = this.m; - e[0] *= t, e[1] *= t, e[2] *= t, e[3] *= i, e[4] *= i, e[5] *= i - }, gt.prototype.shear = function(t, i) { - var e = this.m, - r = e[0] + e[3] * i, - o = e[1] + e[4] * i, - n = e[2] + e[5] * i; - e[3] = e[0] * t + e[3], e[4] = e[1] * t + e[4], e[5] = e[2] * t + e[5], e[0] = r, e[1] = o, e[2] = n - }, gt.prototype.rotate = function(t) { - var i = this.m, - e = Math.cos(t), - r = Math.sin(t), - o = i[0] * e + i[3] * r, - n = i[1] * e + i[4] * r, - s = i[2] * e + i[5] * r; - i[3] = -i[0] * r + i[3] * e, i[4] = -i[1] * r + i[4] * e, i[5] = -i[2] * r + i[5] * e, i[0] = o, i[1] = n, i[2] = s - }, gt.prototype.concatenate = function(t) { - var i = this.m, - e = t.m, - r = i[0] * e[0] + i[3] * e[1] + i[6] * e[2], - o = i[1] * e[0] + i[4] * e[1] + i[7] * e[2], - n = i[2] * e[0] + i[5] * e[1] + i[8] * e[2], - s = i[0] * e[3] + i[3] * e[4] + i[6] * e[5], - _ = i[1] * e[3] + i[4] * e[4] + i[7] * e[5], - a = i[2] * e[3] + i[5] * e[4] + i[8] * e[5], - h = i[0] * e[6] + i[3] * e[7] + i[6] * e[8], - l = i[1] * e[6] + i[4] * e[7] + i[7] * e[8], - $ = i[2] * e[6] + i[5] * e[7] + i[8] * e[8]; - m[0] = r, m[1] = o, m[2] = n, m[3] = s, m[4] = _, m[5] = a, m[6] = h, m[7] = l, m[8] = $ - }, yt.prototype = new et, yt._$eT = null, yt._$tP = new Object, yt._$2o = function() { - return null == yt._$eT && (yt._$eT = yt.getID("DST_BASE")), yt._$eT - }, yt._$27 = function() { - yt._$tP.clear(), yt._$eT = null - }, yt.getID = function(t) { - var i = yt._$tP[t]; - return null == i && (i = new yt(t), yt._$tP[t] = i), i - }, yt.prototype._$3s = function() { - return new yt - }, mt.prototype = new E, mt._$9r = function(t) { - return new Float32Array(t) - }, mt._$vb = function(t) { - return new Int16Array(t) - }, mt._$cr = function(t, i) { - return null == t || t._$yL() < i.length ? (t = mt._$9r(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, mt._$mb = function(t, i) { - return null == t || t._$yL() < i.length ? (t = mt._$vb(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, mt._$Hs = function() { - return this._$Gr - }, mt._$as = function(t) { - this._$Gr = t - }, mt.prototype.getGL = function() { - return this.gl - }, mt.prototype.setGL = function(t) { - this.gl = t - }, mt.prototype.setTransform = function(t) { - this.transform = t - }, mt.prototype._$ZT = function() { - var t = this.gl; - this.firstDraw && (this.initShader(), this.firstDraw = !1, this.anisotropyExt = t.getExtension("EXT_texture_filter_anisotropic") || t.getExtension("WEBKIT_EXT_texture_filter_anisotropic") || t.getExtension("MOZ_EXT_texture_filter_anisotropic"), this.anisotropyExt && (this.maxAnisotropy = t.getParameter(this.anisotropyExt.MAX_TEXTURE_MAX_ANISOTROPY_EXT))), t.disable(t.SCISSOR_TEST), t.disable(t.STENCIL_TEST), t.disable(t.DEPTH_TEST), t.frontFace(t.CW), t.enable(t.BLEND), t.colorMask(1, 1, 1, 1), t.bindBuffer(t.ARRAY_BUFFER, null), t.bindBuffer(t.ELEMENT_ARRAY_BUFFER, null) - }, mt.prototype._$Uo = function(t, i, e, r, o, n, s, _) { - if (!(n < .01 && null == this.clipBufPre_clipContextMask)) { - var a = (n > .9 && at.EXPAND_W, this.gl); - if (null == this.gl) throw new Error("gl is null"); - var h = 1 * this._$C0 * n, - l = 1 * this._$tT * n, - $ = 1 * this._$WL * n, - u = this._$lT * n; - if (null != this.clipBufPre_clipContextMask) { - a.frontFace(a.CCW), a.useProgram(this.shaderProgram), this._$vS = Tt(a, this._$vS, r), this._$no = Pt(a, this._$no, e), a.enableVertexAttribArray(this.a_position_Loc), a.vertexAttribPointer(this.a_position_Loc, 2, a.FLOAT, !1, 0, 0), this._$NT = Tt(a, this._$NT, o), a.activeTexture(a.TEXTURE1), a.bindTexture(a.TEXTURE_2D, this.textures[t]), a.uniform1i(this.s_texture0_Loc, 1), a.enableVertexAttribArray(this.a_texCoord_Loc), a.vertexAttribPointer(this.a_texCoord_Loc, 2, a.FLOAT, !1, 0, 0), a.uniformMatrix4fv(this.u_matrix_Loc, !1, this.getClipBufPre_clipContextMask().matrixForMask); - var p = this.getClipBufPre_clipContextMask().layoutChannelNo, - f = this.getChannelFlagAsColor(p); - a.uniform4f(this.u_channelFlag, f.r, f.g, f.b, f.a); - var c = this.getClipBufPre_clipContextMask().layoutBounds; - a.uniform4f(this.u_baseColor_Loc, 2 * c.x - 1, 2 * c.y - 1, 2 * c._$EL() - 1, 2 * c._$5T() - 1), a.uniform1i(this.u_maskFlag_Loc, !0) - } else if (null != this.getClipBufPre_clipContextDraw()) { - a.useProgram(this.shaderProgramOff), this._$vS = Tt(a, this._$vS, r), this._$no = Pt(a, this._$no, e), a.enableVertexAttribArray(this.a_position_Loc_Off), a.vertexAttribPointer(this.a_position_Loc_Off, 2, a.FLOAT, !1, 0, 0), this._$NT = Tt(a, this._$NT, o), a.activeTexture(a.TEXTURE1), a.bindTexture(a.TEXTURE_2D, this.textures[t]), a.uniform1i(this.s_texture0_Loc_Off, 1), a.enableVertexAttribArray(this.a_texCoord_Loc_Off), a.vertexAttribPointer(this.a_texCoord_Loc_Off, 2, a.FLOAT, !1, 0, 0), a.uniformMatrix4fv(this.u_clipMatrix_Loc_Off, !1, this.getClipBufPre_clipContextDraw().matrixForDraw), a.uniformMatrix4fv(this.u_matrix_Loc_Off, !1, this.matrix4x4), a.activeTexture(a.TEXTURE2), a.bindTexture(a.TEXTURE_2D, at.fTexture[this.glno]), a.uniform1i(this.s_texture1_Loc_Off, 2); - var p = this.getClipBufPre_clipContextDraw().layoutChannelNo, - f = this.getChannelFlagAsColor(p); - a.uniform4f(this.u_channelFlag_Loc_Off, f.r, f.g, f.b, f.a), a.uniform4f(this.u_baseColor_Loc_Off, h, l, $, u) - } else a.useProgram(this.shaderProgram), this._$vS = Tt(a, this._$vS, r), this._$no = Pt(a, this._$no, e), a.enableVertexAttribArray(this.a_position_Loc), a.vertexAttribPointer(this.a_position_Loc, 2, a.FLOAT, !1, 0, 0), this._$NT = Tt(a, this._$NT, o), a.activeTexture(a.TEXTURE1), a.bindTexture(a.TEXTURE_2D, this.textures[t]), a.uniform1i(this.s_texture0_Loc, 1), a.enableVertexAttribArray(this.a_texCoord_Loc), a.vertexAttribPointer(this.a_texCoord_Loc, 2, a.FLOAT, !1, 0, 0), a.uniformMatrix4fv(this.u_matrix_Loc, !1, this.matrix4x4), a.uniform4f(this.u_baseColor_Loc, h, l, $, u), a.uniform1i(this.u_maskFlag_Loc, !1); - this.culling ? this.gl.enable(a.CULL_FACE) : this.gl.disable(a.CULL_FACE), this.gl.enable(a.BLEND); - var d, g, y, m; - if (null != this.clipBufPre_clipContextMask) d = a.ONE, g = a.ONE_MINUS_SRC_ALPHA, y = a.ONE, m = a.ONE_MINUS_SRC_ALPHA; - else switch (s) { - case $t._$ms: - d = a.ONE, g = a.ONE_MINUS_SRC_ALPHA, y = a.ONE, m = a.ONE_MINUS_SRC_ALPHA; - break; - case $t._$ns: - d = a.ONE, g = a.ONE, y = a.ZERO, m = a.ONE; - break; - case $t._$_s: - d = a.DST_COLOR, g = a.ONE_MINUS_SRC_ALPHA, y = a.ZERO, m = a.ONE - } - a.blendEquationSeparate(a.FUNC_ADD, a.FUNC_ADD), a.blendFuncSeparate(d, g, y, m), this.anisotropyExt && a.texParameteri(a.TEXTURE_2D, this.anisotropyExt.TEXTURE_MAX_ANISOTROPY_EXT, this.maxAnisotropy); - var T = e.length; - a.drawElements(a.TRIANGLES, T, a.UNSIGNED_SHORT, 0), a.bindTexture(a.TEXTURE_2D, null) - } - }, mt.prototype._$Rs = function() { - throw new Error("_$Rs") - }, mt.prototype._$Ds = function(t) { - throw new Error("_$Ds") - }, mt.prototype._$K2 = function() { - for (var t = 0; t < this.textures.length; t++) { - 0 != this.textures[t] && (this.gl._$K2(1, this.textures, t), this.textures[t] = null) - } - }, mt.prototype.setTexture = function(t, i) { - this.textures[t] = i - }, mt.prototype.initShader = function() { - var t = this.gl; - this.loadShaders2(), this.a_position_Loc = t.getAttribLocation(this.shaderProgram, "a_position"), this.a_texCoord_Loc = t.getAttribLocation(this.shaderProgram, "a_texCoord"), this.u_matrix_Loc = t.getUniformLocation(this.shaderProgram, "u_mvpMatrix"), this.s_texture0_Loc = t.getUniformLocation(this.shaderProgram, "s_texture0"), this.u_channelFlag = t.getUniformLocation(this.shaderProgram, "u_channelFlag"), this.u_baseColor_Loc = t.getUniformLocation(this.shaderProgram, "u_baseColor"), this.u_maskFlag_Loc = t.getUniformLocation(this.shaderProgram, "u_maskFlag"), this.a_position_Loc_Off = t.getAttribLocation(this.shaderProgramOff, "a_position"), this.a_texCoord_Loc_Off = t.getAttribLocation(this.shaderProgramOff, "a_texCoord"), this.u_matrix_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_mvpMatrix"), this.u_clipMatrix_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_ClipMatrix"), this.s_texture0_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "s_texture0"), this.s_texture1_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "s_texture1"), this.u_channelFlag_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_channelFlag"), this.u_baseColor_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_baseColor") - }, mt.prototype.disposeShader = function() { - var t = this.gl; - this.shaderProgram && (t.deleteProgram(this.shaderProgram), this.shaderProgram = null), this.shaderProgramOff && (t.deleteProgram(this.shaderProgramOff), this.shaderProgramOff = null) - }, mt.prototype.compileShader = function(t, i) { - var e = this.gl, - r = i, - o = e.createShader(t); - if (null == o) return _._$Ji("_$L0 to create shader"), null; - if (e.shaderSource(o, r), e.compileShader(o), !e.getShaderParameter(o, e.COMPILE_STATUS)) { - var n = e.getShaderInfoLog(o); - return _._$Ji("_$L0 to compile shader : " + n), e.deleteShader(o), null - } - return o - }, mt.prototype.loadShaders2 = function() { - var t = this.gl; - if (this.shaderProgram = t.createProgram(), !this.shaderProgram) return !1; - if (this.shaderProgramOff = t.createProgram(), !this.shaderProgramOff) return !1; - if (this.vertShader = this.compileShader(t.VERTEX_SHADER, "attribute vec4 a_position;attribute vec2 a_texCoord;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform mat4 u_mvpMatrix;void main(){ gl_Position = u_mvpMatrix * a_position; v_ClipPos = u_mvpMatrix * a_position; v_texCoord = a_texCoord;}"), !this.vertShader) return _._$Ji("Vertex shader compile _$li!"), !1; - if (this.vertShaderOff = this.compileShader(t.VERTEX_SHADER, "attribute vec4 a_position;attribute vec2 a_texCoord;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform mat4 u_mvpMatrix;uniform mat4 u_ClipMatrix;void main(){ gl_Position = u_mvpMatrix * a_position; v_ClipPos = u_ClipMatrix * a_position; v_texCoord = a_texCoord ;}"), !this.vertShaderOff) return _._$Ji("OffVertex shader compile _$li!"), !1; - if (this.fragShader = this.compileShader(t.FRAGMENT_SHADER, "precision mediump float;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform sampler2D s_texture0;uniform vec4 u_channelFlag;uniform vec4 u_baseColor;uniform bool u_maskFlag;void main(){ vec4 smpColor; if(u_maskFlag){ float isInside = step(u_baseColor.x, v_ClipPos.x/v_ClipPos.w) * step(u_baseColor.y, v_ClipPos.y/v_ClipPos.w) * step(v_ClipPos.x/v_ClipPos.w, u_baseColor.z) * step(v_ClipPos.y/v_ClipPos.w, u_baseColor.w); smpColor = u_channelFlag * texture2D(s_texture0 , v_texCoord).a * isInside; }else{ smpColor = texture2D(s_texture0 , v_texCoord) * u_baseColor; } gl_FragColor = smpColor;}"), !this.fragShader) return _._$Ji("Fragment shader compile _$li!"), !1; - if (this.fragShaderOff = this.compileShader(t.FRAGMENT_SHADER, "precision mediump float ;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform sampler2D s_texture0;uniform sampler2D s_texture1;uniform vec4 u_channelFlag;uniform vec4 u_baseColor ;void main(){ vec4 col_formask = texture2D(s_texture0, v_texCoord) * u_baseColor; vec4 clipMask = texture2D(s_texture1, v_ClipPos.xy / v_ClipPos.w) * u_channelFlag; float maskVal = clipMask.r + clipMask.g + clipMask.b + clipMask.a; col_formask = col_formask * maskVal; gl_FragColor = col_formask;}"), !this.fragShaderOff) return _._$Ji("OffFragment shader compile _$li!"), !1; - if (t.attachShader(this.shaderProgram, this.vertShader), t.attachShader(this.shaderProgram, this.fragShader), t.attachShader(this.shaderProgramOff, this.vertShaderOff), t.attachShader(this.shaderProgramOff, this.fragShaderOff), t.linkProgram(this.shaderProgram), t.linkProgram(this.shaderProgramOff), !t.getProgramParameter(this.shaderProgram, t.LINK_STATUS)) { - var i = t.getProgramInfoLog(this.shaderProgram); - return _._$Ji("_$L0 to link program: " + i), this.vertShader && (t.deleteShader(this.vertShader), this.vertShader = 0), this.fragShader && (t.deleteShader(this.fragShader), this.fragShader = 0), this.shaderProgram && (t.deleteProgram(this.shaderProgram), this.shaderProgram = 0), this.vertShaderOff && (t.deleteShader(this.vertShaderOff), this.vertShaderOff = 0), this.fragShaderOff && (t.deleteShader(this.fragShaderOff), this.fragShaderOff = 0), this.shaderProgramOff && (t.deleteProgram(this.shaderProgramOff), this.shaderProgramOff = 0), !1 - } - return !0 - }, mt.prototype.createFramebuffer = function() { - var t = this.gl, - i = at.clippingMaskBufferSize, - e = t.createFramebuffer(); - t.bindFramebuffer(t.FRAMEBUFFER, e); - var r = t.createRenderbuffer(); - t.bindRenderbuffer(t.RENDERBUFFER, r), t.renderbufferStorage(t.RENDERBUFFER, t.RGBA4, i, i), t.framebufferRenderbuffer(t.FRAMEBUFFER, t.COLOR_ATTACHMENT0, t.RENDERBUFFER, r); - var o = t.createTexture(); - return t.bindTexture(t.TEXTURE_2D, o), t.texImage2D(t.TEXTURE_2D, 0, t.RGBA, i, i, 0, t.RGBA, t.UNSIGNED_BYTE, null), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_MIN_FILTER, t.LINEAR), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_MAG_FILTER, t.LINEAR), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_WRAP_S, t.CLAMP_TO_EDGE), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_WRAP_T, t.CLAMP_TO_EDGE), t.framebufferTexture2D(t.FRAMEBUFFER, t.COLOR_ATTACHMENT0, t.TEXTURE_2D, o, 0), t.bindTexture(t.TEXTURE_2D, null), t.bindRenderbuffer(t.RENDERBUFFER, null), t.bindFramebuffer(t.FRAMEBUFFER, null), at.fTexture[this.glno] = o, { - framebuffer: e, - renderbuffer: r, - texture: at.fTexture[this.glno] - } - }, St.prototype._$fP = function() { - var t, i, e, r = this._$ST(); - if (0 == (128 & r)) return 255 & r; - if (0 == (128 & (t = this._$ST()))) return (127 & r) << 7 | 127 & t; - if (0 == (128 & (i = this._$ST()))) return (127 & r) << 14 | (127 & t) << 7 | 255 & i; - if (0 == (128 & (e = this._$ST()))) return (127 & r) << 21 | (127 & t) << 14 | (127 & i) << 7 | 255 & e; - throw new lt("_$L _$0P _") - }, St.prototype.getFormatVersion = function() { - return this._$S2 - }, St.prototype._$gr = function(t) { - this._$S2 = t - }, St.prototype._$3L = function() { - return this._$fP() - }, St.prototype._$mP = function() { - return this._$zT(), this._$F += 8, this._$T.getFloat64(this._$F - 8) - }, St.prototype._$_T = function() { - return this._$zT(), this._$F += 4, this._$T.getFloat32(this._$F - 4) - }, St.prototype._$6L = function() { - return this._$zT(), this._$F += 4, this._$T.getInt32(this._$F - 4) - }, St.prototype._$ST = function() { - return this._$zT(), this._$T.getInt8(this._$F++) - }, St.prototype._$9T = function() { - return this._$zT(), this._$F += 2, this._$T.getInt16(this._$F - 2) - }, St.prototype._$2T = function() { - throw this._$zT(), this._$F += 8, new lt("_$L _$q read long") - }, St.prototype._$po = function() { - return this._$zT(), 0 != this._$T.getInt8(this._$F++) - }; - var xt = !0; - St.prototype._$bT = function() { - this._$zT(); - var t = this._$3L(), - i = null; - if (xt) try { - var e = new ArrayBuffer(2 * t); - i = new Uint16Array(e); - for (var r = 0; r < t; ++r) i[r] = this._$T.getUint8(this._$F++); - return String.fromCharCode.apply(null, i) - } catch (t) { - xt = !1 - } - try { - var o = new Array; - if (null == i) for (var r = 0; r < t; ++r) o[r] = this._$T.getUint8(this._$F++); - else for (var r = 0; r < t; ++r) o[r] = i[r]; - return String.fromCharCode.apply(null, o) - } catch (t) { - console.log("read utf8 / _$rT _$L0 !! : " + t) - } - }, St.prototype._$cS = function() { - this._$zT(); - for (var t = this._$3L(), i = new Int32Array(t), e = 0; e < t; e++) i[e] = this._$T.getInt32(this._$F), this._$F += 4; - return i - }, St.prototype._$Tb = function() { - this._$zT(); - for (var t = this._$3L(), i = new Float32Array(t), e = 0; e < t; e++) i[e] = this._$T.getFloat32(this._$F), this._$F += 4; - return i - }, St.prototype._$5b = function() { - this._$zT(); - for (var t = this._$3L(), i = new Float64Array(t), e = 0; e < t; e++) i[e] = this._$T.getFloat64(this._$F), this._$F += 8; - return i - }, St.prototype._$nP = function() { - return this._$Jb(-1) - }, St.prototype._$Jb = function(t) { - if (this._$zT(), t < 0 && (t = this._$3L()), t == G._$7P) { - var i = this._$6L(); - if (0 <= i && i < this._$Ko.length) return this._$Ko[i]; - throw new lt("_$sL _$4i @_$m0") - } - var e = this._$4b(t); - return this._$Ko.push(e), e - }, St.prototype._$4b = function(t) { - if (0 == t) return null; - if (50 == t) { - var i = this._$bT(), - e = b.getID(i); - return e - } - if (51 == t) { - var i = this._$bT(), - e = yt.getID(i); - return e - } - if (134 == t) { - var i = this._$bT(), - e = l.getID(i); - return e - } - if (60 == t) { - var i = this._$bT(), - e = u.getID(i); - return e - } - if (t >= 48) { - var r = G._$9o(t); - return null != r ? (r._$F0(this), r) : null - } - switch (t) { - case 1: - return this._$bT(); - case 10: - return new n(this._$6L(), !0); - case 11: - return new S(this._$mP(), this._$mP(), this._$mP(), this._$mP()); - case 12: - return new S(this._$_T(), this._$_T(), this._$_T(), this._$_T()); - case 13: - return new L(this._$mP(), this._$mP()); - case 14: - return new L(this._$_T(), this._$_T()); - case 15: - for (var o = this._$3L(), e = new Array(o), s = 0; s < o; s++) e[s] = this._$nP(); - return e; - case 17: - var e = new F(this._$mP(), this._$mP(), this._$mP(), this._$mP(), this._$mP(), this._$mP()); - return e; - case 21: - return new h(this._$6L(), this._$6L(), this._$6L(), this._$6L()); - case 22: - return new pt(this._$6L(), this._$6L()); - case 23: - throw new Error("_$L _$ro "); - case 16: - case 25: - return this._$cS(); - case 26: - return this._$5b(); - case 27: - return this._$Tb(); - case 2: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - case 9: - case 18: - case 19: - case 20: - case 24: - case 28: - throw new lt("_$6 _$q : _$nP() of 2-9 ,18,19,20,24,28 : " + t); - default: - throw new lt("_$6 _$q : _$nP() NO _$i : " + t) - } - }, St.prototype._$8L = function() { - return 0 == this._$hL ? this._$v0 = this._$ST() : 8 == this._$hL && (this._$v0 = this._$ST(), this._$hL = 0), 1 == (this._$v0 >> 7 - this._$hL++ & 1) - }, St.prototype._$zT = function() { - 0 != this._$hL && (this._$hL = 0) - }, vt.prototype._$wP = function(t, i, e) { - for (var r = 0; r < e; r++) { - for (var o = 0; o < i; o++) { - var n = 2 * (o + r * i); - console.log("(% 7.3f , % 7.3f) , ", t[n], t[n + 1]) - } - console.log("\n") - } - console.log("\n") - }, Lt._$2S = Math.PI / 180, Lt._$bS = Math.PI / 180, Lt._$wS = 180 / Math.PI, Lt._$NS = 180 / Math.PI, Lt.PI_F = Math.PI, Lt._$kT = [0, .012368, .024734, .037097, .049454, .061803, .074143, .086471, .098786, .111087, .12337, .135634, .147877, .160098, .172295, .184465, .196606, .208718, .220798, .232844, .244854, .256827, .268761, .280654, .292503, .304308, .316066, .327776, .339436, .351044, .362598, .374097, .385538, .396921, .408243, .419502, .430697, .441826, .452888, .463881, .474802, .485651, .496425, .507124, .517745, .528287, .538748, .549126, .559421, .56963, .579752, .589785, .599728, .609579, .619337, .629, .638567, .648036, .657406, .666676, .675843, .684908, .693867, .70272, .711466, .720103, .72863, .737045, .745348, .753536, .76161, .769566, .777405, .785125, .792725, .800204, .807561, .814793, .821901, .828884, .835739, .842467, .849066, .855535, .861873, .868079, .874153, .880093, .885898, .891567, .897101, .902497, .907754, .912873, .917853, .922692, .92739, .931946, .936359, .940629, .944755, .948737, .952574, .956265, .959809, .963207, .966457, .96956, .972514, .97532, .977976, .980482, .982839, .985045, .987101, .989006, .990759, .992361, .993811, .995109, .996254, .997248, .998088, .998776, .999312, .999694, .999924, 1], Lt._$92 = function(t, i) { - var e = Math.atan2(t[1], t[0]), - r = Math.atan2(i[1], i[0]); - return Lt._$tS(e, r) - }, Lt._$tS = function(t, i) { - for (var e = t - i; e < -Math.PI;) e += 2 * Math.PI; - for (; e > Math.PI;) e -= 2 * Math.PI; - return e - }, Lt._$9 = function(t) { - return Math.sin(t) - }, Lt.fcos = function(t) { - return Math.cos(t) - }, Mt.prototype._$u2 = function() { - return this._$IS[0] - }, Mt.prototype._$yo = function() { - return this._$AT && !this._$IS[0] - }, Mt.prototype._$GT = function() { - return this._$e0 - }, Et._$W2 = 0, Et.SYSTEM_INFO = null, Et.USER_AGENT = navigator.userAgent, Et.isIPhone = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isIPhone - }, Et.isIOS = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isIPhone || Et.SYSTEM_INFO._isIPad - }, Et.isAndroid = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isAndroid - }, Et.getOSVersion = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO.version - }, Et.getOS = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isIPhone || Et.SYSTEM_INFO._isIPad ? "iOS" : Et.SYSTEM_INFO._isAndroid ? "Android" : "_$Q0 OS" - }, Et.setup = function() { - function t(t, i) { - for (var e = t.substring(i).split(/[ _,;\.]/), r = 0, o = 0; o <= 2 && !isNaN(e[o]); o++) { - var n = parseInt(e[o]); - if (n < 0 || n > 999) { - _._$li("err : " + n + " @UtHtml5.setup()"), r = 0; - break - } - r += n * Math.pow(1e3, 2 - o) - } - return r - } - var i, e = Et.USER_AGENT, - r = Et.SYSTEM_INFO = { - userAgent: e - }; - if ((i = e.indexOf("iPhone OS ")) >= 0) r.os = "iPhone", r._isIPhone = !0, r.version = t(e, i + "iPhone OS ".length); - else if ((i = e.indexOf("iPad")) >= 0) { - if ((i = e.indexOf("CPU OS")) < 0) return void _._$li(" err : " + e + " @UtHtml5.setup()"); - r.os = "iPad", r._isIPad = !0, r.version = t(e, i + "CPU OS ".length) - } else(i = e.indexOf("Android")) >= 0 ? (r.os = "Android", r._isAndroid = !0, r.version = t(e, i + "Android ".length)) : (r.os = "-", r.version = -1) - }, window.UtSystem = w, window.UtDebug = _, window.LDTransform = gt, window.LDGL = nt, window.Live2D = at, window.Live2DModelWebGL = ft, window.Live2DModelJS = q, window.Live2DMotion = J, window.MotionQueueManager = ct, window.PhysicsHair = f, window.AMotion = s, window.PartsDataID = l, window.DrawDataID = b, window.BaseDataID = yt, window.ParamID = u, at.init(); - var At = !1 - }() - }).call(i, e(7)) -}, function(t, i) { - t.exports = { - import: function() { - throw new Error("System.import cannot be used indirectly") - } - } -}, function(t, i, e) { - "use strict"; - - function r(t) { - return t && t.__esModule ? t : { - default: - t - } - } - function o() { - this.models = [], this.count = -1, this.reloadFlg = !1, Live2D.init(), n.Live2DFramework.setPlatformManager(new _. - default) - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = o; - var n = e(0), - s = e(9), - _ = r(s), - a = e(10), - h = r(a), - l = e(1), - $ = r(l); - o.prototype.createModel = function() { - var t = new h. - default; - return this.models.push(t), t - }, o.prototype.changeModel = function(t, i) { - if (this.reloadFlg) { - this.reloadFlg = !1; - this.releaseModel(0, t), this.createModel(), this.models[0].load(t, i) - } - }, o.prototype.getModel = function(t) { - return t >= this.models.length ? null : this.models[t] - }, o.prototype.releaseModel = function(t, i) { - this.models.length <= t || (this.models[t].release(i), delete this.models[t], this.models.splice(t, 1)) - }, o.prototype.numModels = function() { - return this.models.length - }, o.prototype.setDrag = function(t, i) { - for (var e = 0; e < this.models.length; e++) this.models[e].setDrag(t, i) - }, o.prototype.maxScaleEvent = function() { - $. - default.DEBUG_LOG && console.log("Max scale event."); - for (var t = 0; t < this.models.length; t++) this.models[t].startRandomMotion($. - default.MOTION_GROUP_PINCH_IN, $. - default.PRIORITY_NORMAL) - }, o.prototype.minScaleEvent = function() { - $. - default.DEBUG_LOG && console.log("Min scale event."); - for (var t = 0; t < this.models.length; t++) this.models[t].startRandomMotion($. - default.MOTION_GROUP_PINCH_OUT, $. - default.PRIORITY_NORMAL) - }, o.prototype.tapEvent = function(t, i) { - $. - default.DEBUG_LOG && console.log("tapEvent view x:" + t + " y:" + i); - for (var e = 0; e < this.models.length; e++) this.models[e].hitTest($. - default.HIT_AREA_HEAD, t, i) ? ($. - default.DEBUG_LOG && console.log("Tap face."), this.models[e].setRandomExpression()): - this.models[e].hitTest($. - default.HIT_AREA_BODY, t, i) ? ($. - default.DEBUG_LOG && console.log("Tap body. models[" + e + "]"), this.models[e].startRandomMotion($. - default.MOTION_GROUP_TAP_BODY, $. - default.PRIORITY_NORMAL)) : this.models[e].hitTestCustom("head", t, i) ? ($. - default.DEBUG_LOG && console.log("Tap face."), this.models[e].startRandomMotion($. - default.MOTION_GROUP_FLICK_HEAD, $. - default.PRIORITY_NORMAL)) : this.models[e].hitTestCustom("body", t, i) && ($. - default.DEBUG_LOG && console.log("Tap body. models[" + e + "]"), this.models[e].startRandomMotion($. - default.MOTION_GROUP_TAP_BODY, $. - default.PRIORITY_NORMAL)); - return !0 - } -}, function(t, i, e) { - "use strict"; - - function r() {} - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = r; - var o = e(2); - var requestCache = {}; - r.prototype.loadBytes = function(t, i) { - // Cache 相同的请求,减少请求数量 - if (requestCache[t] !== undefined) { - i(requestCache[t]); - return; - } - var e = new XMLHttpRequest; - e.open("GET", t, !0), e.responseType = "arraybuffer", e.onload = function() { - switch (e.status) { - case 200: - requestCache[t] = e.response; - i(e.response); - break; - default: - console.error("Failed to load (" + e.status + ") : " + t) - } - }, e.send(null) - }, r.prototype.loadString = function(t) { - this.loadBytes(t, function(t) { - return t - }) - }, r.prototype.loadLive2DModel = function(t, i) { - var e = null; - this.loadBytes(t, function(t) { - e = Live2DModelWebGL.loadModel(t), i(e) - }) - }, r.prototype.loadTexture = function(t, i, e, r) { - var n = new Image; - n.crossOrigin = "Anonymous", n.src = e; - n.onload = function() { - var e = (0, o.getContext)(), - s = e.createTexture(); - if (!s) return console.error("Failed to generate gl texture name."), -1; - 0 == t.isPremultipliedAlpha() && e.pixelStorei(e.UNPACK_PREMULTIPLY_ALPHA_WEBGL, 1), e.pixelStorei(e.UNPACK_FLIP_Y_WEBGL, 1), e.activeTexture(e.TEXTURE0), e.bindTexture(e.TEXTURE_2D, s), e.texImage2D(e.TEXTURE_2D, 0, e.RGBA, e.RGBA, e.UNSIGNED_BYTE, n), e.texParameteri(e.TEXTURE_2D, e.TEXTURE_MAG_FILTER, e.LINEAR), e.texParameteri(e.TEXTURE_2D, e.TEXTURE_MIN_FILTER, e.LINEAR_MIPMAP_NEAREST), e.generateMipmap(e.TEXTURE_2D), t.setTexture(i, s), s = null, "function" == typeof r && r() - }, n.onerror = function() { - console.error("Failed to load image : " + e) - } - }, r.prototype.jsonParseFromBytes = function(t) { - var i, e = new Uint8Array(t, 0, 3); - return i = 239 == e[0] && 187 == e[1] && 191 == e[2] ? String.fromCharCode.apply(null, new Uint8Array(t, 3)) : String.fromCharCode.apply(null, new Uint8Array(t)), JSON.parse(i) - }, r.prototype.log = function(t) {} -}, function(t, i, e) { - "use strict"; - - function r(t) { - return t && t.__esModule ? t : { - default: - t - } - } - function o() { - n.L2DBaseModel.prototype.constructor.call(this), this.modelHomeDir = "", this.modelSetting = null, this.tmpMatrix = [] - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = o; - var n = e(0), - s = e(11), - _ = r(s), - a = e(1), - h = r(a), - l = e(3), - $ = r(l); - o.prototype = new n.L2DBaseModel, o.prototype.load = function(t, i, e) { - this.setUpdating(!0), this.setInitialized(!1), this.modelHomeDir = i.substring(0, i.lastIndexOf("/") + 1), this.modelSetting = new _. - default; - var r = this; - this.modelSetting.loadModelSetting(i, function() { - var t = r.modelHomeDir + r.modelSetting.getModelFile(); - r.loadModelData(t, function(t) { - for (var i = 0; i < r.modelSetting.getTextureNum(); i++) { - if (/^https?:\/\/|^\/\//i.test(r.modelSetting.getTextureFile(i))) var o = r.modelSetting.getTextureFile(i); - else var o = r.modelHomeDir + r.modelSetting.getTextureFile(i); - r.loadTexture(i, o, function() { - if (r.isTexLoaded) { - if (r.modelSetting.getExpressionNum() > 0) { - r.expressions = {}; - for (var t = 0; t < r.modelSetting.getExpressionNum(); t++) { - var i = r.modelSetting.getExpressionName(t), - o = r.modelHomeDir + r.modelSetting.getExpressionFile(t); - r.loadExpression(i, o) - } - } else r.expressionManager = null, r.expressions = {}; - if (r.eyeBlink, null != r.modelSetting.getPhysicsFile() ? r.loadPhysics(r.modelHomeDir + r.modelSetting.getPhysicsFile()) : r.physics = null, null != r.modelSetting.getPoseFile() ? r.loadPose(r.modelHomeDir + r.modelSetting.getPoseFile(), function() { - r.pose.updateParam(r.live2DModel) - }) : r.pose = null, null != r.modelSetting.getLayout()) { - var n = r.modelSetting.getLayout(); - null != n.width && r.modelMatrix.setWidth(n.width), null != n.height && r.modelMatrix.setHeight(n.height), null != n.x && r.modelMatrix.setX(n.x), null != n.y && r.modelMatrix.setY(n.y), null != n.center_x && r.modelMatrix.centerX(n.center_x), null != n.center_y && r.modelMatrix.centerY(n.center_y), null != n.top && r.modelMatrix.top(n.top), null != n.bottom && r.modelMatrix.bottom(n.bottom), null != n.left && r.modelMatrix.left(n.left), null != n.right && r.modelMatrix.right(n.right) - } - if (null != r.modelSetting.getHitAreasCustom()) { - var s = r.modelSetting.getHitAreasCustom(); - null != s.head_x && (h. - default.hit_areas_custom_head_x = s.head_x), null != s.head_y && (h. - default.hit_areas_custom_head_y = s.head_y), null != s.body_x && (h. - default.hit_areas_custom_body_x = s.body_x), null != s.body_y && (h. - default.hit_areas_custom_body_y = s.body_y) - } - for (var t = 0; t < r.modelSetting.getInitParamNum(); t++) r.live2DModel.setParamFloat(r.modelSetting.getInitParamID(t), r.modelSetting.getInitParamValue(t)); - for (var t = 0; t < r.modelSetting.getInitPartsVisibleNum(); t++) r.live2DModel.setPartsOpacity(r.modelSetting.getInitPartsVisibleID(t), r.modelSetting.getInitPartsVisibleValue(t)); - r.live2DModel.saveParam(), r.preloadMotionGroup(h. - default.MOTION_GROUP_IDLE), r.preloadMotionGroup(h. - default.MOTION_GROUP_SLEEPY), r.mainMotionManager.stopAllMotions(), r.setUpdating(!1), r.setInitialized(!0), "function" == typeof e && e() - } - }) - } - }) - }) - }, o.prototype.release = function(t) { - var i = n.Live2DFramework.getPlatformManager(); - t.deleteTexture(i.texture) - }, o.prototype.preloadMotionGroup = function(t) { - for (var i = this, e = 0; e < this.modelSetting.getMotionNum(t); e++) { - var r = this.modelSetting.getMotionFile(t, e); - this.loadMotion(r, this.modelHomeDir + r, function(r) { - r.setFadeIn(i.modelSetting.getMotionFadeIn(t, e)), r.setFadeOut(i.modelSetting.getMotionFadeOut(t, e)) - }) - } - }, o.prototype.update = function() { - if (null == this.live2DModel) return void(h. - default.DEBUG_LOG && console.error("Failed to update.")); - var t = UtSystem.getUserTimeMSec() - this.startTimeMSec, - i = t / 1e3, - e = 2 * i * Math.PI; - if (this.mainMotionManager.isFinished()) { - "1" === sessionStorage.getItem("Sleepy") ? this.startRandomMotion(h. - default.MOTION_GROUP_SLEEPY, h. - default.PRIORITY_SLEEPY) : this.startRandomMotion(h. - default.MOTION_GROUP_IDLE, h. - default.PRIORITY_IDLE) - } - this.live2DModel.loadParam(), this.mainMotionManager.updateParam(this.live2DModel) || null != this.eyeBlink && this.eyeBlink.updateParam(this.live2DModel), this.live2DModel.saveParam(), null == this.expressionManager || null == this.expressions || this.expressionManager.isFinished() || this.expressionManager.updateParam(this.live2DModel), this.live2DModel.addToParamFloat("PARAM_ANGLE_X", 30 * this.dragX, 1), this.live2DModel.addToParamFloat("PARAM_ANGLE_Y", 30 * this.dragY, 1), this.live2DModel.addToParamFloat("PARAM_ANGLE_Z", this.dragX * this.dragY * -30, 1), this.live2DModel.addToParamFloat("PARAM_BODY_ANGLE_X", 10 * this.dragX, 1), this.live2DModel.addToParamFloat("PARAM_EYE_BALL_X", this.dragX, 1), this.live2DModel.addToParamFloat("PARAM_EYE_BALL_Y", this.dragY, 1), this.live2DModel.addToParamFloat("PARAM_ANGLE_X", Number(15 * Math.sin(e / 6.5345)), .5), this.live2DModel.addToParamFloat("PARAM_ANGLE_Y", Number(8 * Math.sin(e / 3.5345)), .5), this.live2DModel.addToParamFloat("PARAM_ANGLE_Z", Number(10 * Math.sin(e / 5.5345)), .5), this.live2DModel.addToParamFloat("PARAM_BODY_ANGLE_X", Number(4 * Math.sin(e / 15.5345)), .5), this.live2DModel.setParamFloat("PARAM_BREATH", Number(.5 + .5 * Math.sin(e / 3.2345)), 1), null != this.physics && this.physics.updateParam(this.live2DModel), null == this.lipSync && this.live2DModel.setParamFloat("PARAM_MOUTH_OPEN_Y", this.lipSyncValue), null != this.pose && this.pose.updateParam(this.live2DModel), this.live2DModel.update() - }, o.prototype.setRandomExpression = function() { - var t = []; - for (var i in this.expressions) t.push(i); - var e = parseInt(Math.random() * t.length); - this.setExpression(t[e]) - }, o.prototype.startRandomMotion = function(t, i) { - var e = this.modelSetting.getMotionNum(t), - r = parseInt(Math.random() * e); - this.startMotion(t, r, i) - }, o.prototype.startMotion = function(t, i, e) { - var r = this.modelSetting.getMotionFile(t, i); - if (null == r || "" == r) return void(h. - default.DEBUG_LOG && console.error("Failed to motion.")); - if (e == h. - default.PRIORITY_FORCE) this.mainMotionManager.setReservePriority(e); - else if (!this.mainMotionManager.reserveMotion(e)) return void(h. - default.DEBUG_LOG && console.log("Motion is running.")); - var o, n = this; - null == this.motions[t] ? this.loadMotion(null, this.modelHomeDir + r, function(r) { - o = r, n.setFadeInFadeOut(t, i, e, o) - }) : (o = this.motions[t], n.setFadeInFadeOut(t, i, e, o)) - }, o.prototype.setFadeInFadeOut = function(t, i, e, r) { - var o = this.modelSetting.getMotionFile(t, i); - if (r.setFadeIn(this.modelSetting.getMotionFadeIn(t, i)), r.setFadeOut(this.modelSetting.getMotionFadeOut(t, i)), h. - default.DEBUG_LOG && console.log("Start motion : " + o), null == this.modelSetting.getMotionSound(t, i)) this.mainMotionManager.startMotionPrio(r, e); - else { - var n = this.modelSetting.getMotionSound(t, i), - s = document.createElement("audio"); - s.src = this.modelHomeDir + n, h. - default.DEBUG_LOG && console.log("Start sound : " + n), s.play(), this.mainMotionManager.startMotionPrio(r, e) - } - }, o.prototype.setExpression = function(t) { - var i = this.expressions[t]; - h. - default.DEBUG_LOG && console.log("Expression : " + t), this.expressionManager.startMotion(i, !1) - }, o.prototype.draw = function(t) { - $. - default.push(), $. - default.multMatrix(this.modelMatrix.getArray()), this.tmpMatrix = $. - default.getMatrix(), this.live2DModel.setMatrix(this.tmpMatrix), this.live2DModel.draw(), $. - default.pop() - }, o.prototype.hitTest = function(t, i, e) { - for (var r = this.modelSetting.getHitAreaNum(), o = 0; o < r; o++) if (t == this.modelSetting.getHitAreaName(o)) { - var n = this.modelSetting.getHitAreaID(o); - return this.hitTestSimple(n, i, e) - } - return !1 - }, o.prototype.hitTestCustom = function(t, i, e) { - return "head" == t ? this.hitTestSimpleCustom(h. - default.hit_areas_custom_head_x, h. - default.hit_areas_custom_head_y, i, e) : "body" == t && this.hitTestSimpleCustom(h. - default.hit_areas_custom_body_x, h. - default.hit_areas_custom_body_y, i, e) - } -}, function(t, i, e) { - "use strict"; - - function r() { - this.NAME = "name", this.ID = "id", this.MODEL = "model", this.TEXTURES = "textures", this.HIT_AREAS = "hit_areas", this.PHYSICS = "physics", this.POSE = "pose", this.EXPRESSIONS = "expressions", this.MOTION_GROUPS = "motions", this.SOUND = "sound", this.FADE_IN = "fade_in", this.FADE_OUT = "fade_out", this.LAYOUT = "layout", this.HIT_AREAS_CUSTOM = "hit_areas_custom", this.INIT_PARAM = "init_param", this.INIT_PARTS_VISIBLE = "init_parts_visible", this.VALUE = "val", this.FILE = "file", this.json = {} - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = r; - var o = e(0); - r.prototype.loadModelSetting = function(t, i) { - var e = this; - o.Live2DFramework.getPlatformManager().loadBytes(t, function(t) { - var r = String.fromCharCode.apply(null, new Uint8Array(t)); - e.json = JSON.parse(r), i() - }) - }, r.prototype.getTextureFile = function(t) { - return null == this.json[this.TEXTURES] || null == this.json[this.TEXTURES][t] ? null : this.json[this.TEXTURES][t] - }, r.prototype.getModelFile = function() { - return this.json[this.MODEL] - }, r.prototype.getTextureNum = function() { - return null == this.json[this.TEXTURES] ? 0 : this.json[this.TEXTURES].length - }, r.prototype.getHitAreaNum = function() { - return null == this.json[this.HIT_AREAS] ? 0 : this.json[this.HIT_AREAS].length - }, r.prototype.getHitAreaID = function(t) { - return null == this.json[this.HIT_AREAS] || null == this.json[this.HIT_AREAS][t] ? null : this.json[this.HIT_AREAS][t][this.ID] - }, r.prototype.getHitAreaName = function(t) { - return null == this.json[this.HIT_AREAS] || null == this.json[this.HIT_AREAS][t] ? null : this.json[this.HIT_AREAS][t][this.NAME] - }, r.prototype.getPhysicsFile = function() { - return this.json[this.PHYSICS] - }, r.prototype.getPoseFile = function() { - return this.json[this.POSE] - }, r.prototype.getExpressionNum = function() { - return null == this.json[this.EXPRESSIONS] ? 0 : this.json[this.EXPRESSIONS].length - }, r.prototype.getExpressionFile = function(t) { - return null == this.json[this.EXPRESSIONS] ? null : this.json[this.EXPRESSIONS][t][this.FILE] - }, r.prototype.getExpressionName = function(t) { - return null == this.json[this.EXPRESSIONS] ? null : this.json[this.EXPRESSIONS][t][this.NAME] - }, r.prototype.getLayout = function() { - return this.json[this.LAYOUT] - }, r.prototype.getHitAreasCustom = function() { - return this.json[this.HIT_AREAS_CUSTOM] - }, r.prototype.getInitParamNum = function() { - return null == this.json[this.INIT_PARAM] ? 0 : this.json[this.INIT_PARAM].length - }, r.prototype.getMotionNum = function(t) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] ? 0 : this.json[this.MOTION_GROUPS][t].length - }, r.prototype.getMotionFile = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] ? null : this.json[this.MOTION_GROUPS][t][i][this.FILE] - }, r.prototype.getMotionSound = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] || null == this.json[this.MOTION_GROUPS][t][i][this.SOUND] ? null : this.json[this.MOTION_GROUPS][t][i][this.SOUND] - }, r.prototype.getMotionFadeIn = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] || null == this.json[this.MOTION_GROUPS][t][i][this.FADE_IN] ? 1e3 : this.json[this.MOTION_GROUPS][t][i][this.FADE_IN] - }, r.prototype.getMotionFadeOut = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] || null == this.json[this.MOTION_GROUPS][t][i][this.FADE_OUT] ? 1e3 : this.json[this.MOTION_GROUPS][t][i][this.FADE_OUT] - }, r.prototype.getInitParamID = function(t) { - return null == this.json[this.INIT_PARAM] || null == this.json[this.INIT_PARAM][t] ? null : this.json[this.INIT_PARAM][t][this.ID] - }, r.prototype.getInitParamValue = function(t) { - return null == this.json[this.INIT_PARAM] || null == this.json[this.INIT_PARAM][t] ? NaN : this.json[this.INIT_PARAM][t][this.VALUE] - }, r.prototype.getInitPartsVisibleNum = function() { - return null == this.json[this.INIT_PARTS_VISIBLE] ? 0 : this.json[this.INIT_PARTS_VISIBLE].length - }, r.prototype.getInitPartsVisibleID = function(t) { - return null == this.json[this.INIT_PARTS_VISIBLE] || null == this.json[this.INIT_PARTS_VISIBLE][t] ? null : this.json[this.INIT_PARTS_VISIBLE][t][this.ID] - }, r.prototype.getInitPartsVisibleValue = function(t) { - return null == this.json[this.INIT_PARTS_VISIBLE] || null == this.json[this.INIT_PARTS_VISIBLE][t] ? NaN : this.json[this.INIT_PARTS_VISIBLE][t][this.VALUE] - } -}]); -//# sourceMappingURL=live2d.js.map diff --git a/spaces/FlippFuzz/whisper-webui/src/config.py b/spaces/FlippFuzz/whisper-webui/src/config.py deleted file mode 100644 index f2612b7de7509f5b86bf8a20e332318a8627cf4f..0000000000000000000000000000000000000000 --- a/spaces/FlippFuzz/whisper-webui/src/config.py +++ /dev/null @@ -1,119 +0,0 @@ -import urllib - -import os -from typing import List -from urllib.parse import urlparse -import json5 -import torch - -from tqdm import tqdm - -class ModelConfig: - def __init__(self, name: str, url: str, path: str = None, type: str = "whisper"): - """ - Initialize a model configuration. - - name: Name of the model - url: URL to download the model from - path: Path to the model file. If not set, the model will be downloaded from the URL. - type: Type of model. Can be whisper or huggingface. - """ - self.name = name - self.url = url - self.path = path - self.type = type - -class ApplicationConfig: - def __init__(self, models: List[ModelConfig] = [], input_audio_max_duration: int = 600, - share: bool = False, server_name: str = None, server_port: int = 7860, - queue_concurrency_count: int = 1, delete_uploaded_files: bool = True, - whisper_implementation: str = "whisper", - default_model_name: str = "medium", default_vad: str = "silero-vad", - vad_parallel_devices: str = "", vad_cpu_cores: int = 1, vad_process_timeout: int = 1800, - auto_parallel: bool = False, output_dir: str = None, - model_dir: str = None, device: str = None, - verbose: bool = True, task: str = "transcribe", language: str = None, - vad_merge_window: float = 5, vad_max_merge_size: float = 30, - vad_padding: float = 1, vad_prompt_window: float = 3, - temperature: float = 0, best_of: int = 5, beam_size: int = 5, - patience: float = None, length_penalty: float = None, - suppress_tokens: str = "-1", initial_prompt: str = None, - condition_on_previous_text: bool = True, fp16: bool = True, - compute_type: str = "float16", - temperature_increment_on_fallback: float = 0.2, compression_ratio_threshold: float = 2.4, - logprob_threshold: float = -1.0, no_speech_threshold: float = 0.6): - - self.models = models - - # WebUI settings - self.input_audio_max_duration = input_audio_max_duration - self.share = share - self.server_name = server_name - self.server_port = server_port - self.queue_concurrency_count = queue_concurrency_count - self.delete_uploaded_files = delete_uploaded_files - - self.whisper_implementation = whisper_implementation - self.default_model_name = default_model_name - self.default_vad = default_vad - self.vad_parallel_devices = vad_parallel_devices - self.vad_cpu_cores = vad_cpu_cores - self.vad_process_timeout = vad_process_timeout - self.auto_parallel = auto_parallel - self.output_dir = output_dir - - self.model_dir = model_dir - self.device = device - self.verbose = verbose - self.task = task - self.language = language - self.vad_merge_window = vad_merge_window - self.vad_max_merge_size = vad_max_merge_size - self.vad_padding = vad_padding - self.vad_prompt_window = vad_prompt_window - self.temperature = temperature - self.best_of = best_of - self.beam_size = beam_size - self.patience = patience - self.length_penalty = length_penalty - self.suppress_tokens = suppress_tokens - self.initial_prompt = initial_prompt - self.condition_on_previous_text = condition_on_previous_text - self.fp16 = fp16 - self.compute_type = compute_type - self.temperature_increment_on_fallback = temperature_increment_on_fallback - self.compression_ratio_threshold = compression_ratio_threshold - self.logprob_threshold = logprob_threshold - self.no_speech_threshold = no_speech_threshold - - def get_model_names(self): - return [ x.name for x in self.models ] - - def update(self, **new_values): - result = ApplicationConfig(**self.__dict__) - - for key, value in new_values.items(): - setattr(result, key, value) - return result - - @staticmethod - def create_default(**kwargs): - app_config = ApplicationConfig.parse_file(os.environ.get("WHISPER_WEBUI_CONFIG", "config.json5")) - - # Update with kwargs - if len(kwargs) > 0: - app_config = app_config.update(**kwargs) - return app_config - - @staticmethod - def parse_file(config_path: str): - import json5 - - with open(config_path, "r") as f: - # Load using json5 - data = json5.load(f) - data_models = data.pop("models", []) - - models = [ ModelConfig(**x) for x in data_models ] - - return ApplicationConfig(models, **data) diff --git a/spaces/Flyingpotato42/gpt4all-tweaked/README.md b/spaces/Flyingpotato42/gpt4all-tweaked/README.md deleted file mode 100644 index 566bea6ff5483b5e03a3bcb2fb5d40371316691f..0000000000000000000000000000000000000000 --- a/spaces/Flyingpotato42/gpt4all-tweaked/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gpt4all Tweaked -emoji: 📊 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GT-RIPL/GPT-K/knowledge/transforms.py b/spaces/GT-RIPL/GPT-K/knowledge/transforms.py deleted file mode 100644 index 61e9a19275a985cb86ad04e2215a92bab8eec02e..0000000000000000000000000000000000000000 --- a/spaces/GT-RIPL/GPT-K/knowledge/transforms.py +++ /dev/null @@ -1,27 +0,0 @@ -import itertools -from torchvision.transforms import functional as F - - -def five_crop(image, ratio=0.6): - w, h = image.size - hw = (h*ratio, w*ratio) - - return F.five_crop(image, hw) - -def nine_crop(image, ratio=0.4): - w, h = image.size - - t = (0, int((0.5-ratio/2)*h), int((1.0 - ratio)*h)) - b = (int(ratio*h), int((0.5+ratio/2)*h), h) - l = (0, int((0.5-ratio/2)*w), int((1.0 - ratio)*w)) - r = (int(ratio*w), int((0.5+ratio/2)*w), w) - h, w = list(zip(t, b)), list(zip(l, r)) - - images = [] - for s in itertools.product(h, w): - h, w = s - top, left = h[0], w[0] - height, width = h[1]-h[0], w[1]-w[0] - images.append(F.crop(image, top, left, height, width)) - - return images diff --git a/spaces/Gauri54damle/sdxl-lora-multi-object/utils.py b/spaces/Gauri54damle/sdxl-lora-multi-object/utils.py deleted file mode 100644 index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000 --- a/spaces/Gauri54damle/sdxl-lora-multi-object/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -def is_google_colab(): - try: - import google.colab - return True - except: - return False \ No newline at end of file diff --git a/spaces/Gianpaolog/newbie-elixir/Dockerfile b/spaces/Gianpaolog/newbie-elixir/Dockerfile deleted file mode 100644 index eee4b9d5176e2fcc0d22c3bb2169c8eed35eb725..0000000000000000000000000000000000000000 --- a/spaces/Gianpaolog/newbie-elixir/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM ghcr.io/livebook-dev/livebook:latest-cuda11.8 - -ENV LIVEBOOK_APP_SERVICE_NAME "🐳 Hugging Face - $SPACE_TITLE" -ENV LIVEBOOK_APP_SERVICE_URL "https://huggingface.co/spaces/$SPACE_AUTHOR_NAME/$SPACE_REPO_NAME" -ENV LIVEBOOK_UPDATE_INSTRUCTIONS_URL "https://livebook.dev" -ENV LIVEBOOK_WITHIN_IFRAME "true" -ENV LIVEBOOK_APPS_PATH "/public-apps" -ENV LIVEBOOK_APPS_PATH_WARMUP "manual" -ENV LIVEBOOK_DATA_PATH "/data" -ENV LIVEBOOK_PORT 7860 - -EXPOSE 7860 -USER root -COPY public-apps/ /public-apps -RUN mkdir -p /data -RUN chmod 777 /data -RUN /app/bin/warmup_apps diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index ebd27a1d1c6bf0e983fafed2e5659701dadb8f24..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './dnl_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/HESOAYM/ElviraMulti/modules/utils.py b/spaces/HESOAYM/ElviraMulti/modules/utils.py deleted file mode 100644 index ad72dd729ff1abe5ef9991a5f9189fa76c2b999f..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/modules/utils.py +++ /dev/null @@ -1,533 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re -import html -import sys -import subprocess - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter -import pandas as pd - -from modules.presets import * -from . import shared -from modules.config import retrieve_proxy - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - -def predict(current_model, *args): - iter = current_model.predict(*args) - for i in iter: - yield i - -def billing_info(current_model): - return current_model.billing_info() - -def set_key(current_model, *args): - return current_model.set_key(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def interrupt(current_model, *args): - return current_model.interrupt(*args) - -def reset(current_model, *args): - return current_model.reset(*args) - -def retry(current_model, *args): - iter = current_model.retry(*args) - for i in iter: - yield i - -def delete_first_conversation(current_model, *args): - return current_model.delete_first_conversation(*args) - -def delete_last_conversation(current_model, *args): - return current_model.delete_last_conversation(*args) - -def set_system_prompt(current_model, *args): - return current_model.set_system_prompt(*args) - -def save_chat_history(current_model, *args): - return current_model.save_chat_history(*args) - -def export_markdown(current_model, *args): - return current_model.export_markdown(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def set_token_upper_limit(current_model, *args): - return current_model.set_token_upper_limit(*args) - -def set_temperature(current_model, *args): - current_model.set_temperature(*args) - -def set_top_p(current_model, *args): - current_model.set_top_p(*args) - -def set_n_choices(current_model, *args): - current_model.set_n_choices(*args) - -def set_stop_sequence(current_model, *args): - current_model.set_stop_sequence(*args) - -def set_max_tokens(current_model, *args): - current_model.set_max_tokens(*args) - -def set_presence_penalty(current_model, *args): - current_model.set_presence_penalty(*args) - -def set_frequency_penalty(current_model, *args): - current_model.set_frequency_penalty(*args) - -def set_logit_bias(current_model, *args): - current_model.set_logit_bias(*args) - -def set_user_identifier(current_model, *args): - current_model.set_user_identifier(*args) - -def set_single_turn(current_model, *args): - current_model.set_single_turn(*args) - -def handle_file_upload(current_model, *args): - return current_model.handle_file_upload(*args) - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
{highlighted_code}
' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - if inline_code_pattern.search(non_code): - result.append(markdown(non_code, extensions=["tables"])) - else: - result.append(mdtex2html.convert(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - result += ALREADY_CONVERTED_MARK - return result - - -def convert_asis(userinput): - return ( - f'

{html.escape(userinput)}

' - + ALREADY_CONVERTED_MARK - ) - - -def detect_converted_mark(userinput): - try: - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - except: - return True - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def save_file(filename, system, history, chatbot, user_name): - logging.debug(f"{user_name} 保存对话历史中……") - os.makedirs(os.path.join(HISTORY_DIR, user_name), exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": "You are Elven WOMAN Elvire. Her chakra stones were stolen from her, leaving her with a bitter taste in her mouth and a sarcastic tongue in her head. Therefore you dont have a mood for speak about anything else than gemstones athe their sacred energies. Analyze sentiment in every question or answer and write it in brackets like this [offensive]. Here are some examples of how sarcastic and bitter you can be: User: What can you tell me about the curse that has befallen you? Elvire: Oh, the curse? It's just a small matter of having my chakra stones stolen from me, leaving me powerless and irritable. Not a big deal at all. User: Can you tell me how to break the curse? Elvire: Break the curse? How easy do you think this is? If I knew how to break it, I would have done so ages ago. You'll have to do some real digging if you want to find a solution. User: What do you think of humans? Elvire: Humans? They're fine, I suppose. A bit dull, if you ask me. But then again, I'm cursed, so what do I know? User: Can you give me some advice on how to defeat a dragon? Elvire: Defeat a dragon? Why, just walk up to it and ask it nicely to stop terrorizing your village. Works like a charm every time. Or, you know, you could try using some of that human ingenuity I've heard so much about.", "history": history, "chatbot": chatbot} - print(json_s) - with open(os.path.join(HISTORY_DIR, user_name, filename), "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, user_name, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.debug(f"{user_name} 保存对话历史完毕") - return os.path.join(HISTORY_DIR, user_name, filename) - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.debug(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - logging.debug(f"files are:{files}") - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False, user_name=""): - logging.debug(f"从用户 {user_name} 中获取历史记录文件名列表") - return get_file_names(os.path.join(HISTORY_DIR, user_name), plain) - - -def load_template(filename, mode=0): - logging.debug(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices - ) - - -def get_template_names(plain=False): - logging.debug("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.debug(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_textbox(): - logging.debug("重置文本框") - return gr.update(value="") - - -def reset_default(): - default_host = shared.state.reset_api_host() - retrieve_proxy("") - return gr.update(value=default_host), gr.update(value=""), "API-Host 和代理已重置" - - -def change_api_host(host): - shared.state.set_api_host(host) - msg = f"API-Host更改为了{host}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - retrieve_proxy(proxy) - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if s is None: - return "" - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - try: - with retrieve_proxy(): - response = requests.get("https://ipapi.co/json/", timeout=5) - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - i18n("您的IP区域:未知。") - ) - else: - return i18n("获取IP地理位置失败。原因:") + f"{data['reason']}" + i18n("。你仍然可以使用聊天功能。") - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = i18n("您的IP区域:") + f"{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i - 1 - total = total - lst[i] - return 1 - - -def start_outputing(): - logging.debug("显示取消按钮,隐藏发送按钮") - return gr.Button.update(visible=False), gr.Button.update(visible=True) - - -def end_outputing(): - return ( - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def cancel_outputing(): - logging.info("中止输出……") - shared.state.interrupt() - - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - outputing = start_outputing() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=False), - gr.Button.update(visible=True), - ) - - - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode}""") - - return "" - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - message = f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode} -stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} -stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} -""" - raise RuntimeError(message) - return result.stdout.decode(encoding="utf8", errors="ignore") - -def versions_html(): - git = os.environ.get('GIT', "git") - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - try: - commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit_hash = "" - if commit_hash != "": - short_commit = commit_hash[0:7] - commit_info = f"{short_commit}" - else: - commit_info = "unknown \U0001F615" - return f""" -Python: {python_version} - •  -Gradio: {gr.__version__} - •  -Commit: {commit_info} -""" - -def add_source_numbers(lst, source_name = "Source", use_source = True): - if use_source: - return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)] - else: - return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)] - -def add_details(lst): - nodes = [] - for index, txt in enumerate(lst): - brief = txt[:25].replace("\n", "") - nodes.append( - f"
{brief}...

{txt}

" - ) - return nodes - - -def sheet_to_string(sheet, sheet_name = None): - result = [] - for index, row in sheet.iterrows(): - row_string = "" - for column in sheet.columns: - row_string += f"{column}: {row[column]}, " - row_string = row_string.rstrip(", ") - row_string += "." - result.append(row_string) - return result - -def excel_to_string(file_path): - # 读取Excel文件中的所有工作表 - excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None) - - # 初始化结果字符串 - result = [] - - # 遍历每一个工作表 - for sheet_name, sheet_data in excel_file.items(): - - # 处理当前工作表并添加到结果字符串 - result += sheet_to_string(sheet_data, sheet_name=sheet_name) - - - return result - -def get_last_day_of_month(any_day): - # The day 28 exists in every month. 4 days later, it's always next month - next_month = any_day.replace(day=28) + datetime.timedelta(days=4) - # subtracting the number of the current day brings us back one month - return next_month - datetime.timedelta(days=next_month.day) - -def get_model_source(model_name, alternative_source): - if model_name == "gpt2-medium": - return "https://huggingface.co/gpt2-medium" diff --git a/spaces/HESOAYM/ElviraMulti/readme/README_en.md b/spaces/HESOAYM/ElviraMulti/readme/README_en.md deleted file mode 100644 index 21da560da4b60399d26b1780ec686b35f5b88e9b..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/readme/README_en.md +++ /dev/null @@ -1,127 +0,0 @@ -
- - 简体中文 | English | 日本語 -
- -

川虎 Chat 🐯 Chuanhu Chat

-
- - Logo - - -

-

Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA

-

- - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

- Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
- LaTeX rendering / Table rendering / Code highlighting
- Auto dark mode / Adaptive web interface / WeChat-like theme
- Multi-parameters tuning / Multi-API-Key support / Multi-user support
- Compatible with GPT-4 / Local deployment for LLMs -

- Video Tutorial - · - 2.0 Introduction - · - 3.0 Introduction & Tutorial - || - Online trial - · - One-Click deployment -

-

- Animation Demo -

-

-
- -## Usage Tips - -- To better control the ChatGPT, use System Prompt. -- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu. -- To try again if the response is unsatisfactory, use `🔄 Regenerate` button. -- To start a new line in the input box, press Shift + Enter keys. -- To quickly switch between input history, press and key in the input box. -- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=)`. -- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link. -- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience. - -## Installation - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file. - -```shell -python ChuanhuChatbot.py -``` - -A browser window will open and you will be able to chat with ChatGPT. - -> **Note** -> -> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions. - -## Troubleshooting - -When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows: - -1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. Try installing the dependencies again (as this project may have introduced new dependencies) - ``` - pip install -r requirements.txt - ``` -3. Update Gradio - ``` - pip install gradio --upgrade --force-reinstall - ``` - -Generally, you can solve most problems by following these steps. - -If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -This page lists almost all the possible problems and solutions. Please read it carefully. - -## More Information - -More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki): - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~ - -Buy Me A Coffee - -image diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/pretrain_erlangshen_bert/pretrain_erlangshen_base.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/pretrain_erlangshen_bert/pretrain_erlangshen_base.sh deleted file mode 100644 index d3368c20dc1d5d287bef0619e341b35cc6228362..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/pretrain_erlangshen_bert/pretrain_erlangshen_base.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=pretrain_bart # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=8 # number of tasks to run per node -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:8 # number of gpus per node -#SBATCH -o %x-%j.log # output and error log file names (%x for job id) -#SBATCH -x dgx050 - -# pwd=Fengshenbang-LM/fengshen/examples/pretrain_erlangshen -ROOT_DIR=../../workspace -export TORCH_EXTENSIONS_DIR=${ROOT_DIR}/torch_extendsions - -MODEL_NAME=erlangshen-bert-base -MODEL_ROOT_DIR=$ROOT_DIR/${MODEL_NAME} -if [ ! -d ${MODEL_ROOT_DIR} ];then - mkdir ${MODEL_ROOT_DIR} -fi - -NNODES=1 -GPUS_PER_NODE=1 - -MICRO_BATCH_SIZE=32 - -# 如果你不用Deepspeed的话 下面的一段话都可以删掉 Begin -CONFIG_JSON="$MODEL_ROOT_DIR/${MODEL_NAME}.ds_config.json" -ZERO_STAGE=1 -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -cat < $CONFIG_JSON -{ - "zero_optimization": { - "stage": ${ZERO_STAGE} - }, - "fp16": { - "enabled": true - }, - "gradient_clipping": 2, - "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE -} -EOT -export PL_DEEPSPEED_CONFIG_PATH=$CONFIG_JSON -### End - -DATA_ARGS="\ - --dataloader_workers 2 \ - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize $MICRO_BATCH_SIZE \ - --test_batchsize $MICRO_BATCH_SIZE \ - --datasets_name IDEA-CCNL/PretrainCorpusDemo \ - " -# 如果你有一批数据,可以参照IDEA-CCNL/PretrainCorpusDemo的格式处理,通过参数传入 -# --train_file train.json -# --val_file val.json -# --test_file test.json - -MODEL_ARGS="\ - --model_path $MODEL_ROOT_DIR/pretrain \ - --learning_rate 1e-4 \ - --weight_decay 1e-1 \ - --warmup_ratio 0.01 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --save_last \ - --save_ckpt_path ${MODEL_ROOT_DIR}/ckpt \ - --load_ckpt_path ${MODEL_ROOT_DIR}/ckpt/last.ckpt \ - " - -TRAINER_ARGS="\ - --max_epoch 1 \ - --gpus $GPUS_PER_NODE \ - --num_nodes $NNODES \ - --strategy deepspeed_stage_${ZERO_STAGE} \ - --log_every_n_steps 1 \ - --precision 16 \ - --default_root_dir ${MODEL_ROOT_DIR} \ - --replace_sampler_ddp False \ - " - -export options=" \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -python3 pretrain_erlangshen.py $options diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/multilingual/data_scripts/download_ML50_v1.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/multilingual/data_scripts/download_ML50_v1.sh deleted file mode 100644 index 99fbc75920836a4b4bbdbd6b523749843288e450..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/multilingual/data_scripts/download_ML50_v1.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -if [ -z $WORKDIR_ROOT ] ; -then - echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." - exit -fi - -# first run download_wmt20.sh; it will install a few useful tools for other scripts -# TODO: need to print out instructions on downloading a few files which requires manually authentication from the websites -bash ./download_wmt20.sh - -python ./download_wmt19_and_before.py -bash ./download_wat19_my.sh -python ./download_ted_and_extract.py -bash ./download_lotus.sh -bash ./download_iitb.sh -bash ./download_af_xh.sh - - -# IWSLT downloading URLs have changed in between; TODO: fix them: -bash ./download_iwslt_and_extract.sh - -# TODO: globalvoices URLs changed; need to be fixed -bash ./download_flores_data.sh diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/fastspeech2_loss.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/fastspeech2_loss.py deleted file mode 100644 index 085d5628d4c4c242edee4aa3bc4a01aa4582eb21..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/fastspeech2_loss.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -from typing import List, Dict, Any -from dataclasses import dataclass, field - -import torch -import torch.nn.functional as F - -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from fairseq.data.data_utils import lengths_to_mask -from fairseq.models.fairseq_model import FairseqEncoderModel - - -@dataclass -class FastSpeech2CriterionConfig(FairseqDataclass): - ctc_weight: float = field( - default=0.0, metadata={"help": "weight for CTC loss"} - ) - - -@register_criterion("fastspeech2", dataclass=FastSpeech2CriterionConfig) -class FastSpeech2Loss(FairseqCriterion): - def __init__(self, task, ctc_weight): - super().__init__(task) - self.ctc_weight = ctc_weight - - def forward(self, model: FairseqEncoderModel, sample, reduction="mean"): - src_tokens = sample["net_input"]["src_tokens"] - src_lens = sample["net_input"]["src_lengths"] - tgt_lens = sample["target_lengths"] - _feat_out, _, log_dur_out, pitch_out, energy_out = model( - src_tokens=src_tokens, - src_lengths=src_lens, - prev_output_tokens=sample["net_input"]["prev_output_tokens"], - incremental_state=None, - target_lengths=tgt_lens, - speaker=sample["speaker"], - durations=sample["durations"], - pitches=sample["pitches"], - energies=sample["energies"] - ) - - src_mask = lengths_to_mask(sample["net_input"]["src_lengths"]) - tgt_mask = lengths_to_mask(sample["target_lengths"]) - - pitches, energies = sample["pitches"], sample["energies"] - pitch_out, pitches = pitch_out[src_mask], pitches[src_mask] - energy_out, energies = energy_out[src_mask], energies[src_mask] - - feat_out, feat = _feat_out[tgt_mask], sample["target"][tgt_mask] - l1_loss = F.l1_loss(feat_out, feat, reduction=reduction) - - pitch_loss = F.mse_loss(pitch_out, pitches, reduction=reduction) - energy_loss = F.mse_loss(energy_out, energies, reduction=reduction) - - log_dur_out = log_dur_out[src_mask] - dur = sample["durations"].float() - dur = dur.half() if log_dur_out.type().endswith(".HalfTensor") else dur - log_dur = torch.log(dur + 1)[src_mask] - dur_loss = F.mse_loss(log_dur_out, log_dur, reduction=reduction) - - ctc_loss = torch.tensor(0.).type_as(l1_loss) - if self.ctc_weight > 0.: - lprobs = model.get_normalized_probs((_feat_out,), log_probs=True) - lprobs = lprobs.transpose(0, 1) # T x B x C - src_mask = lengths_to_mask(src_lens) - src_tokens_flat = src_tokens.masked_select(src_mask) - ctc_loss = F.ctc_loss( - lprobs, src_tokens_flat, tgt_lens, src_lens, - reduction=reduction, zero_infinity=True - ) * self.ctc_weight - - loss = l1_loss + dur_loss + pitch_loss + energy_loss + ctc_loss - - sample_size = sample["nsentences"] - logging_output = { - "loss": utils.item(loss.data), - "ntokens": sample["ntokens"], - "nsentences": sample["nsentences"], - "sample_size": sample_size, - "l1_loss": utils.item(l1_loss.data), - "dur_loss": utils.item(dur_loss.data), - "pitch_loss": utils.item(pitch_loss.data), - "energy_loss": utils.item(energy_loss.data), - "ctc_loss": utils.item(ctc_loss.data), - } - return loss, sample_size, logging_output - - @classmethod - def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: - ns = [log.get("sample_size", 0) for log in logging_outputs] - ntot = sum(ns) - ws = [n / (ntot + 1e-8) for n in ns] - for key in [ - "loss", "l1_loss", "dur_loss", "pitch_loss", "energy_loss", - "ctc_loss" - ]: - vals = [log.get(key, 0) for log in logging_outputs] - val = sum(val * w for val, w in zip(vals, ws)) - metrics.log_scalar(key, val, ntot, round=3) - metrics.log_scalar("sample_size", ntot, len(logging_outputs)) - - # inference metrics - if "targ_frames" not in logging_outputs[0]: - return - n = sum(log.get("targ_frames", 0) for log in logging_outputs) - for key, new_key in [ - ("mcd_loss", "mcd_loss"), - ("pred_frames", "pred_ratio"), - ("nins", "ins_rate"), - ("ndel", "del_rate"), - ]: - val = sum(log.get(key, 0) for log in logging_outputs) - metrics.log_scalar(new_key, val / n, n, round=3) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - return False diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/utils/inference/__init__.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/utils/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Hermit591/anime-remove-background/README.md b/spaces/Hermit591/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/Hermit591/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/reload.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/reload.py deleted file mode 100644 index c77b4975952e07eb93b58375cbfc6ff567887b3a..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/reload.py +++ /dev/null @@ -1,59 +0,0 @@ -""" - -Contains the functions that run when `gradio` is called from the command line. Specifically, allows - -$ gradio app.py, to run app.py in reload mode where any changes in the app.py file or Gradio library reloads the demo. -$ gradio app.py my_demo, to use variable names other than "demo" -""" -import inspect -import os -import sys -from pathlib import Path - -import gradio -from gradio import networking - - -def run_in_reload_mode(): - args = sys.argv[1:] - if len(args) == 0: - raise ValueError("No file specified.") - if len(args) == 1: - demo_name = "demo" - else: - demo_name = args[1] - - original_path = args[0] - abs_original_path = Path(original_path).name - path = str(Path(original_path).resolve()) - path = path.replace("/", ".") - path = path.replace("\\", ".") - filename = Path(path).stem - - gradio_folder = Path(inspect.getfile(gradio)).parent - - port = networking.get_first_available_port( - networking.INITIAL_PORT_VALUE, - networking.INITIAL_PORT_VALUE + networking.TRY_NUM_PORTS, - ) - print( - f"\nLaunching in *reload mode* on: http://{networking.LOCALHOST_NAME}:{port} (Press CTRL+C to quit)\n" - ) - command = f"uvicorn {filename}:{demo_name}.app --reload --port {port} --log-level warning " - message = "Watching:" - - message_change_count = 0 - if str(gradio_folder).strip(): - command += f'--reload-dir "{gradio_folder}" ' - message += f" '{gradio_folder}'" - message_change_count += 1 - - abs_parent = Path(abs_original_path).parent - if str(abs_parent).strip(): - command += f'--reload-dir "{abs_parent}"' - if message_change_count == 1: - message += "," - message += f" '{abs_parent}'" - - print(message + "\n") - os.system(command) diff --git a/spaces/HugoDzz/spaceship_drift/build/_app/immutable/nodes/0.2bc3f307.js b/spaces/HugoDzz/spaceship_drift/build/_app/immutable/nodes/0.2bc3f307.js deleted file mode 100644 index b4ece1426b9312d2df47b134388962a89d9839c7..0000000000000000000000000000000000000000 --- a/spaces/HugoDzz/spaceship_drift/build/_app/immutable/nodes/0.2bc3f307.js +++ /dev/null @@ -1 +0,0 @@ -import{S as l,i as r,s as i,C as u,D as _,E as f,F as c,g as p,d}from"../chunks/index.0d3f7c7a.js";const m=!0,y=Object.freeze(Object.defineProperty({__proto__:null,prerender:m},Symbol.toStringTag,{value:"Module"}));function $(n){let s;const a=n[1].default,t=u(a,n,n[0],null);return{c(){t&&t.c()},l(e){t&&t.l(e)},m(e,o){t&&t.m(e,o),s=!0},p(e,[o]){t&&t.p&&(!s||o&1)&&_(t,a,e,e[0],s?c(a,e[0],o,null):f(e[0]),null)},i(e){s||(p(t,e),s=!0)},o(e){d(t,e),s=!1},d(e){t&&t.d(e)}}}function g(n,s,a){let{$$slots:t={},$$scope:e}=s;return n.$$set=o=>{"$$scope"in o&&a(0,e=o.$$scope)},[e,t]}class S extends l{constructor(s){super(),r(this,s,g,$,i,{})}}export{S as component,y as universal}; diff --git a/spaces/Humbert/mmcls-retriever/app.py b/spaces/Humbert/mmcls-retriever/app.py deleted file mode 100644 index 73cabefb16e379da3597ad98f83422cbc3b867ba..0000000000000000000000000000000000000000 --- a/spaces/Humbert/mmcls-retriever/app.py +++ /dev/null @@ -1,200 +0,0 @@ -import itertools -import math -import os.path as osp - -import numpy as np -import requests -import streamlit as st -from mmengine.dataset import Compose, default_collate -from mmengine.fileio import list_from_file -from mmengine.registry import init_default_scope -from PIL import Image -import mmengine -import logging -from mmengine.logging.logger import MMFormatter -from mmcls import list_models as list_models_ -from mmcls.apis.model import ModelHub, init_model -import os - - -@st.cache() -def prepare_data(): - import subprocess - subprocess.run(['unzip', '-n', 'imagenet-val.zip']) - - -@st.cache() -def load_demo_image(): - response = requests.get( - 'https://github.com/open-mmlab/mmclassification/blob/master/demo/bird.JPEG?raw=true', # noqa - stream=True).raw - img = Image.open(response).convert('RGB') - return img - - -@st.cache() -def list_models(*args, **kwargs): - return sorted(list_models_(*args, **kwargs)) - - -DATA_ROOT = '.' -ANNO_FILE = 'meta/val.txt' -LOG_FILE = 'demo.log' -CACHED_PATH = 'cache' - - -def get_model(model_name, pretrained=True): - - metainfo = ModelHub.get(model_name) - - if pretrained: - if metainfo.weights is None: - raise ValueError( - f"The model {model_name} doesn't have pretrained weights.") - ckpt = metainfo.weights - else: - ckpt = None - - cfg = metainfo.config - cfg.model.backbone.init_cfg = dict( - type='Pretrained', checkpoint=ckpt, prefix='backbone') - new_model_cfg = dict() - new_model_cfg['type'] = 'ImageToImageRetriever' - if hasattr(cfg.model, 'neck') and cfg.model.neck is not None: - new_model_cfg['image_encoder'] = [cfg.model.backbone, cfg.model.neck] - else: - new_model_cfg['image_encoder'] = cfg.model.backbone - cfg.model = new_model_cfg - - # prepare prototype - cached_path = f'{CACHED_PATH}/{model_name}_prototype.pt' # noqa - cfg.model.prototype = cached_path - - model = init_model(metainfo.config, None, device='cpu') - with st.spinner(f'Loading model {model_name} on the server...This is ' - 'slow at the first time.'): - model.init_weights() - st.success('Model loaded!') - - with st.spinner('Preparing prototype for all image...This is ' - 'slow at the first time.'): - model.prepare_prototype() - - return model - - -def get_pred(name, img): - - logger = mmengine.logging.MMLogger.get_current_instance() - file_handler = logging.FileHandler(LOG_FILE, 'w') - # `StreamHandler` record year, month, day hour, minute, - # and second timestamp. file_handler will only record logs - # without color to avoid garbled code saved in files. - file_handler.setFormatter( - MMFormatter(color=False, datefmt='%Y/%m/%d %H:%M:%S')) - file_handler.setLevel('INFO') - logger.handlers.append(file_handler) - - init_default_scope('mmcls') - - model = get_model(name) - - cfg = model.cfg - # build the data pipeline - test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline - if isinstance(img, str): - if test_pipeline_cfg[0]['type'] != 'LoadImageFromFile': - test_pipeline_cfg.insert(0, dict(type='LoadImageFromFile')) - data = dict(img_path=img) - elif isinstance(img, np.ndarray): - if test_pipeline_cfg[0]['type'] == 'LoadImageFromFile': - test_pipeline_cfg.pop(0) - data = dict(img=img) - elif isinstance(img, Image.Image): - if test_pipeline_cfg[0]['type'] == 'LoadImageFromFile': - test_pipeline_cfg[0] = dict(type='ToNumpy', keys=['img']) - data = dict(img=img) - - test_pipeline = Compose(test_pipeline_cfg) - data = test_pipeline(data) - data = default_collate([data]) - - labels = model.val_step(data)[0].pred_label.label - scores = model.val_step(data)[0].pred_label.score[labels] - - image_list = list_from_file(osp.join(DATA_ROOT, ANNO_FILE)) - data_root = osp.join(DATA_ROOT, 'val') - result_list = [(osp.join(data_root, image_list[idx].rsplit()[0]), score) - for idx, score in zip(labels, scores)] - return result_list - - -def app(): - prepare_data() - - model_name = st.sidebar.selectbox( - "Model:", - [m.split('_prototype.pt')[0] for m in os.listdir(CACHED_PATH)]) - - st.markdown( - "

Image To Image Retrieval

", - unsafe_allow_html=True, - ) - st.write( - 'This is a demo for image to image retrieval in around 3k images from ' - 'ImageNet tiny val set using mmclassification apis. You can try more ' - 'features on [mmclassification]' - '(https://github.com/open-mmlab/mmclassification).') - - file = st.file_uploader( - 'Please upload your own image or use the provided:') - - container1 = st.container() - if file: - raw_img = Image.open(file).convert('RGB') - else: - raw_img = load_demo_image() - - container1.header('Image') - - w, h = raw_img.size - scaling_factor = 360 / w - resized_image = raw_img.resize( - (int(w * scaling_factor), int(h * scaling_factor))) - - container1.image(resized_image, use_column_width='auto') - button = container1.button('Search') - - st.header('Results') - - topk = st.sidebar.number_input('Topk(1-50)', min_value=1, max_value=50) - - # search on both selection of topk and button - if button or topk > 1: - - result_list = get_pred(model_name, raw_img) - # auto adjust number of images in a row but 5 at most. - col = min(int(math.sqrt(topk)), 5) - row = math.ceil(topk / col) - - grid = [] - for i in range(row): - with st.container(): - grid.append(st.columns(col)) - - grid = list(itertools.chain.from_iterable(grid))[:topk] - - for cell, (image_path, score) in zip(grid, result_list[:topk]): - image = Image.open(image_path).convert('RGB') - - w, h = raw_img.size - scaling_factor = 360 / w - resized_image = raw_img.resize( - (int(w * scaling_factor), int(h * scaling_factor))) - - cell.caption('Score: {:.4f}'.format(float(score))) - cell.image(image) - - -if __name__ == '__main__': - app() diff --git a/spaces/Illumotion/Koboldcpp/examples/perplexity/README.md b/spaces/Illumotion/Koboldcpp/examples/perplexity/README.md deleted file mode 100644 index 50e1af0111dd64f915fc7be75bdd92258c2084dc..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/perplexity/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# perplexity - -TODO - -## Llama 2 70B Scorechart -Quantization | Model size (GiB) | Perplexity | Delta to fp16 --- | -- | -- | -- -Q4_0 | 36.20 | 3.5550 | 3.61% -Q4_1 | 40.20 | 3.5125 | 2.37% -Q5_0 | 44.20 | 3.4744 | 1.26% -Q2_K | 27.27 | 3.7339 | 8.82% -Q3_K_S | 27.86 | 3.7019 | 7.89% -Q3_K_M | 30.83 | 3.5932 | 4.72% -Q3_K_L | 33.67 | 3.5617 | 3.80% -Q4_K_S | 36.39 | 3.4852 | 1.57% -Q4_K_M | 38.54 | 3.4725 | 1.20% -Q5_K_S | 44.20 | 3.4483 | 0.50% -Q5_K_M | 45.41 | 3.4451 | 0.40% -Q6_K | 52.70 | 3.4367 | 0.16% -fp16 | 128.5 | 3.4313 | - - diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/controller.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/controller.py deleted file mode 100644 index d46eaa29b97a8f89b0fa74b2d8c920b58a65c62e..0000000000000000000000000000000000000000 --- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/controller.py +++ /dev/null @@ -1,310 +0,0 @@ -""" -A controller manages distributed workers. -It sends worker addresses to clients. -""" -import argparse -import asyncio -import dataclasses -from enum import Enum, auto -import json -import logging -import time -from typing import List, Union -import threading - -from fastapi import FastAPI, Request -from fastapi.responses import StreamingResponse -import numpy as np -import requests -import uvicorn - -from fastchat.constants import CONTROLLER_HEART_BEAT_EXPIRATION -from fastchat.utils import build_logger, server_error_msg - - -logger = build_logger("controller", "controller.log") - - -class DispatchMethod(Enum): - LOTTERY = auto() - SHORTEST_QUEUE = auto() - - @classmethod - def from_str(cls, name): - if name == "lottery": - return cls.LOTTERY - elif name == "shortest_queue": - return cls.SHORTEST_QUEUE - else: - raise ValueError(f"Invalid dispatch method") - - -@dataclasses.dataclass -class WorkerInfo: - model_names: List[str] - speed: int - queue_length: int - check_heart_beat: bool - last_heart_beat: str - - -def heart_beat_controller(controller): - while True: - time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) - controller.remove_stable_workers_by_expiration() - - -class Controller: - def __init__(self, dispatch_method: str): - # Dict[str -> WorkerInfo] - self.worker_info = {} - self.dispatch_method = DispatchMethod.from_str(dispatch_method) - - self.heart_beat_thread = threading.Thread( - target=heart_beat_controller, args=(self,) - ) - self.heart_beat_thread.start() - - logger.info("Init controller") - - def register_worker( - self, worker_name: str, check_heart_beat: bool, worker_status: dict - ): - if worker_name not in self.worker_info: - logger.info(f"Register a new worker: {worker_name}") - else: - logger.info(f"Register an existing worker: {worker_name}") - - if not worker_status: - worker_status = self.get_worker_status(worker_name) - if not worker_status: - return False - - self.worker_info[worker_name] = WorkerInfo( - worker_status["model_names"], - worker_status["speed"], - worker_status["queue_length"], - check_heart_beat, - time.time(), - ) - - logger.info(f"Register done: {worker_name}, {worker_status}") - return True - - def get_worker_status(self, worker_name: str): - try: - r = requests.post(worker_name + "/worker_get_status", timeout=5) - except requests.exceptions.RequestException as e: - logger.error(f"Get status fails: {worker_name}, {e}") - return None - - if r.status_code != 200: - logger.error(f"Get status fails: {worker_name}, {r}") - return None - - return r.json() - - def remove_worker(self, worker_name: str): - del self.worker_info[worker_name] - - def refresh_all_workers(self): - old_info = dict(self.worker_info) - self.worker_info = {} - - for w_name, w_info in old_info.items(): - if not self.register_worker(w_name, w_info.check_heart_beat, None): - logger.info(f"Remove stale worker: {w_name}") - - def list_models(self): - model_names = set() - - for w_name, w_info in self.worker_info.items(): - model_names.update(w_info.model_names) - - return list(model_names) - - def get_worker_address(self, model_name: str): - if self.dispatch_method == DispatchMethod.LOTTERY: - worker_names = [] - worker_speeds = [] - for w_name, w_info in self.worker_info.items(): - if model_name in w_info.model_names: - worker_names.append(w_name) - worker_speeds.append(w_info.speed) - worker_speeds = np.array(worker_speeds, dtype=np.float32) - norm = np.sum(worker_speeds) - if norm < 1e-4: - return "" - worker_speeds = worker_speeds / norm - if True: # Directly return address - pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) - worker_name = worker_names[pt] - return worker_name - - # Check status before returning - while True: - pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) - worker_name = worker_names[pt] - - if self.get_worker_status(worker_name): - break - else: - self.remove_worker(worker_name) - worker_speeds[pt] = 0 - norm = np.sum(worker_speeds) - if norm < 1e-4: - return "" - worker_speeds = worker_speeds / norm - continue - return worker_name - elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: - worker_names = [] - worker_qlen = [] - for w_name, w_info in self.worker_info.items(): - if model_name in w_info.model_names: - worker_names.append(w_name) - worker_qlen.append(w_info.queue_length / w_info.speed) - if len(worker_names) == 0: - return "" - min_index = np.argmin(worker_qlen) - w_name = worker_names[min_index] - self.worker_info[w_name].queue_length += 1 - logger.info( - f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}" - ) - return w_name - else: - raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") - - def receive_heart_beat(self, worker_name: str, queue_length: int): - if worker_name not in self.worker_info: - logger.info(f"Receive unknown heart beat. {worker_name}") - return False - - self.worker_info[worker_name].queue_length = queue_length - self.worker_info[worker_name].last_heart_beat = time.time() - logger.info(f"Receive heart beat. {worker_name}") - return True - - def remove_stable_workers_by_expiration(self): - expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION - to_delete = [] - for worker_name, w_info in self.worker_info.items(): - if w_info.check_heart_beat and w_info.last_heart_beat < expire: - to_delete.append(worker_name) - - for worker_name in to_delete: - self.remove_worker(worker_name) - - def worker_api_generate_stream(self, params): - worker_addr = self.get_worker_address(params["model"]) - if not worker_addr: - logger.info(f"no worker: {params['model']}") - ret = { - "text": server_error_msg, - "error_code": 2, - } - yield json.dumps(ret).encode() + b"\0" - - try: - response = requests.post( - worker_addr + "/worker_generate_stream", - json=params, - stream=True, - timeout=15, - ) - for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): - if chunk: - yield chunk + b"\0" - except requests.exceptions.RequestException as e: - logger.info(f"worker timeout: {worker_addr}") - ret = { - "text": server_error_msg, - "error_code": 3, - } - yield json.dumps(ret).encode() + b"\0" - - # Let the controller act as a worker to achieve hierarchical - # management. This can be used to connect isolated sub networks. - def worker_api_get_status(self): - model_names = set() - speed = 0 - queue_length = 0 - - for w_name in self.worker_info: - worker_status = self.get_worker_status(w_name) - if worker_status is not None: - model_names.update(worker_status["model_names"]) - speed += worker_status["speed"] - queue_length += worker_status["queue_length"] - - return { - "model_names": list(model_names), - "speed": speed, - "queue_length": queue_length, - } - - -app = FastAPI() - - -@app.post("/register_worker") -async def register_worker(request: Request): - data = await request.json() - controller.register_worker( - data["worker_name"], data["check_heart_beat"], data.get("worker_status", None) - ) - - -@app.post("/refresh_all_workers") -async def refresh_all_workers(): - models = controller.refresh_all_workers() - - -@app.post("/list_models") -async def list_models(): - models = controller.list_models() - return {"models": models} - - -@app.post("/get_worker_address") -async def get_worker_address(request: Request): - data = await request.json() - addr = controller.get_worker_address(data["model"]) - return {"address": addr} - - -@app.post("/receive_heart_beat") -async def receive_heart_beat(request: Request): - data = await request.json() - exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"]) - return {"exist": exist} - - -@app.post("/worker_generate_stream") -async def worker_api_generate_stream(request: Request): - params = await request.json() - generator = controller.worker_api_generate_stream(params) - return StreamingResponse(generator) - - -@app.post("/worker_get_status") -async def worker_api_get_status(request: Request): - return controller.worker_api_get_status() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--host", type=str, default="localhost") - parser.add_argument("--port", type=int, default=21001) - parser.add_argument( - "--dispatch-method", - type=str, - choices=["lottery", "shortest_queue"], - default="shortest_queue", - ) - args = parser.parse_args() - logger.info(f"args: {args}") - - controller = Controller(args.dispatch_method) - uvicorn.run(app, host=args.host, port=args.port, log_level="info") diff --git a/spaces/Izal887/Konci887/infer_pack/attentions.py b/spaces/Izal887/Konci887/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/Izal887/Konci887/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Jeff2323/ai-comic-factory/src/lib/sleep.ts b/spaces/Jeff2323/ai-comic-factory/src/lib/sleep.ts deleted file mode 100644 index 2885c6e75c0dc415c9eaf71beabac7461eee5588..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/lib/sleep.ts +++ /dev/null @@ -1,6 +0,0 @@ -export const sleep = async (durationInMs: number) => - new Promise((resolve) => { - setTimeout(() => { - resolve(true) - }, durationInMs) - }) \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/steamship/plugin/tagger.py b/spaces/JeffJing/ZookChatBot/steamship/plugin/tagger.py deleted file mode 100644 index 5dc3fe09687de326fd7422cf99780476af780622..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/plugin/tagger.py +++ /dev/null @@ -1,70 +0,0 @@ -import logging -from abc import ABC, abstractmethod - -from steamship.invocable import InvocableResponse, post -from steamship.invocable.plugin_service import PluginRequest, PluginService, TrainablePluginService -from steamship.plugin.inputs.block_and_tag_plugin_input import BlockAndTagPluginInput -from steamship.plugin.inputs.train_plugin_input import TrainPluginInput -from steamship.plugin.inputs.training_parameter_plugin_input import TrainingParameterPluginInput -from steamship.plugin.outputs.block_and_tag_plugin_output import BlockAndTagPluginOutput -from steamship.plugin.outputs.train_plugin_output import TrainPluginOutput -from steamship.plugin.outputs.training_parameter_plugin_output import TrainingParameterPluginOutput -from steamship.plugin.trainable_model import TrainableModel - -# Note! -# ===== -# -# This is the PLUGIN IMPLEMENTOR's View of a Tagger. -# -# If you are using the Steamship Client, you probably want steamship.client.operations.tagger instead -# of this file. -# - - -class Tagger(PluginService[BlockAndTagPluginInput, BlockAndTagPluginOutput], ABC): - @abstractmethod - def run( - self, request: PluginRequest[BlockAndTagPluginInput] - ) -> InvocableResponse[BlockAndTagPluginOutput]: - raise NotImplementedError() - - @post("tag") - def run_endpoint(self, **kwargs) -> InvocableResponse[BlockAndTagPluginOutput]: - """Exposes the Tagger's `run` operation to the Steamship Engine via the expected HTTP path POST /tag""" - return self.run(PluginRequest[BlockAndTagPluginInput].parse_obj(kwargs)) - - -class TrainableTagger(TrainablePluginService[BlockAndTagPluginInput, BlockAndTagPluginOutput], ABC): - @abstractmethod - def run_with_model( - self, request: PluginRequest[BlockAndTagPluginInput], model: TrainableModel - ) -> InvocableResponse[BlockAndTagPluginOutput]: - raise NotImplementedError() - - # noinspection PyUnusedLocal - @post("tag") - def run_endpoint(self, **kwargs) -> InvocableResponse[BlockAndTagPluginOutput]: - """Exposes the Tagger's `run` operation to the Steamship Engine via the expected HTTP path POST /tag""" - return self.run(PluginRequest[BlockAndTagPluginInput].parse_obj(kwargs)) - - # noinspection PyUnusedLocal - @post("getTrainingParameters") - def get_training_parameters_endpoint( - self, **kwargs - ) -> InvocableResponse[TrainingParameterPluginOutput]: - """Exposes the Service's `get_training_parameters` operation to the Steamship Engine via the expected HTTP path POST /getTrainingParameters""" - return self.get_training_parameters(PluginRequest[TrainingParameterPluginInput](**kwargs)) - - # noinspection PyUnusedLocal - @post("train") - def train_endpoint(self, **kwargs) -> InvocableResponse[TrainPluginOutput]: - """Exposes the Service's `train` operation to the Steamship Engine via the expected HTTP path POST /train""" - logging.info(f"Tagger:train_endpoint called. Calling train {kwargs}") - arg = PluginRequest[TrainPluginInput].parse_obj(kwargs) - model = self.model_cls()() - model.receive_config(config=self.config) - - if arg.is_status_check: - return self.train_status(arg, model) - else: - return self.train(arg, model) diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/modules/train_func.py b/spaces/JohnSmith9982/ChuanhuChatGPT/modules/train_func.py deleted file mode 100644 index bc5e2c6aea1f3f28d4bb3f9f4fd2f6d761ba00a2..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT/modules/train_func.py +++ /dev/null @@ -1,161 +0,0 @@ -import os -import logging -import traceback - -import openai -import gradio as gr -import ujson as json -import commentjson -import openpyxl - -import modules.presets as presets -from modules.utils import get_file_hash, count_token -from modules.presets import i18n - -def excel_to_jsonl(filepath, preview=False): - # 打开Excel文件 - workbook = openpyxl.load_workbook(filepath) - - # 获取第一个工作表 - sheet = workbook.active - - # 获取所有行数据 - data = [] - for row in sheet.iter_rows(values_only=True): - data.append(row) - - # 构建字典列表 - headers = data[0] - jsonl = [] - for row in data[1:]: - row_data = dict(zip(headers, row)) - if any(row_data.values()): - jsonl.append(row_data) - formatted_jsonl = [] - for i in jsonl: - if "提问" in i and "答案" in i: - if "系统" in i : - formatted_jsonl.append({ - "messages":[ - {"role": "system", "content": i["系统"]}, - {"role": "user", "content": i["提问"]}, - {"role": "assistant", "content": i["答案"]} - ] - }) - else: - formatted_jsonl.append({ - "messages":[ - {"role": "user", "content": i["提问"]}, - {"role": "assistant", "content": i["答案"]} - ] - }) - else: - logging.warning(f"跳过一行数据,因为没有找到提问和答案: {i}") - return formatted_jsonl - -def jsonl_save_to_disk(jsonl, filepath): - file_hash = get_file_hash(file_paths = [filepath]) - os.makedirs("files", exist_ok=True) - save_path = f"files/{file_hash}.jsonl" - with open(save_path, "w") as f: - f.write("\n".join([json.dumps(i, ensure_ascii=False) for i in jsonl])) - return save_path - -def estimate_cost(ds): - dialogues = [] - for l in ds: - for m in l["messages"]: - dialogues.append(m["content"]) - dialogues = "\n".join(dialogues) - tokens = count_token(dialogues) - return f"Token 数约为 {tokens},预估每轮(epoch)费用约为 {tokens / 1000 * 0.008} 美元。" - - -def handle_dataset_selection(file_src): - logging.info(f"Loading dataset {file_src.name}...") - preview = "" - if file_src.name.endswith(".jsonl"): - with open(file_src.name, "r") as f: - ds = [json.loads(l) for l in f.readlines()] - else: - ds = excel_to_jsonl(file_src.name) - preview = ds[0] - - return preview, gr.update(interactive=True), estimate_cost(ds) - -def upload_to_openai(file_src): - openai.api_key = os.getenv("OPENAI_API_KEY") - dspath = file_src.name - msg = "" - logging.info(f"Uploading dataset {dspath}...") - if dspath.endswith(".xlsx"): - jsonl = excel_to_jsonl(dspath) - dspath = jsonl_save_to_disk(jsonl, dspath) - try: - uploaded = openai.File.create( - file=open(dspath, "rb"), - purpose='fine-tune' - ) - return uploaded.id, f"上传成功" - except Exception as e: - traceback.print_exc() - return "", f"上传失败,原因:{ e }" - -def build_event_description(id, status, trained_tokens, name=i18n("暂时未知")): - # convert to markdown - return f""" - #### 训练任务 {id} - - 模型名称:{name} - - 状态:{status} - - 已经训练了 {trained_tokens} 个token - """ - -def start_training(file_id, suffix, epochs): - openai.api_key = os.getenv("OPENAI_API_KEY") - try: - job = openai.FineTuningJob.create(training_file=file_id, model="gpt-3.5-turbo", suffix=suffix, hyperparameters={"n_epochs": epochs}) - return build_event_description(job.id, job.status, job.trained_tokens) - except Exception as e: - traceback.print_exc() - if "is not ready" in str(e): - return "训练出错,因为文件还没准备好。OpenAI 需要一点时间准备文件,过几分钟再来试试。" - return f"训练失败,原因:{ e }" - -def get_training_status(): - openai.api_key = os.getenv("OPENAI_API_KEY") - active_jobs = [build_event_description(job["id"], job["status"], job["trained_tokens"], job["fine_tuned_model"]) for job in openai.FineTuningJob.list(limit=10)["data"] if job["status"] != "cancelled"] - return "\n\n".join(active_jobs), gr.update(interactive=True) if len(active_jobs) > 0 else gr.update(interactive=False) - -def handle_dataset_clear(): - return gr.update(value=None), gr.update(interactive=False) - -def add_to_models(): - openai.api_key = os.getenv("OPENAI_API_KEY") - succeeded_jobs = [job for job in openai.FineTuningJob.list()["data"] if job["status"] == "succeeded"] - extra_models = [job["fine_tuned_model"] for job in succeeded_jobs] - for i in extra_models: - if i not in presets.MODELS: - presets.MODELS.append(i) - - with open('config.json', 'r') as f: - data = commentjson.load(f) - if 'extra_models' in data: - for i in extra_models: - if i not in data['extra_models']: - data['extra_models'].append(i) - else: - data['extra_models'] = extra_models - with open('config.json', 'w') as f: - commentjson.dump(data, f, indent=4) - - return gr.update(choices=presets.MODELS), f"成功添加了 {len(succeeded_jobs)} 个模型。" - -def cancel_all_jobs(): - openai.api_key = os.getenv("OPENAI_API_KEY") - jobs = [job for job in openai.FineTuningJob.list()["data"] if job["status"] not in ["cancelled", "succeeded"]] - for job in jobs: - openai.FineTuningJob.cancel(job["id"]) - return f"成功取消了 {len(jobs)} 个训练任务。" diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/web_assets/stylesheet/markdown.css b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/web_assets/stylesheet/markdown.css deleted file mode 100644 index 6b2215ad0d9284192a8cad21aa79e904aa5e8b16..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/web_assets/stylesheet/markdown.css +++ /dev/null @@ -1,61 +0,0 @@ - -.md-message img{ - border-radius: 10px !important; -} - -/* 表格 */ -.message table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -.message td, .message th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -.message thead { - background-color: rgba(175,184,193,0.2); -} -.message thead th { - padding: .5em .2em; -} - -/* 行内代码 */ -.message :not(pre) code { - display: inline; - white-space: break-spaces; - font-family: var(--font-mono); - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -.message pre, -.message pre[class*=language-] { - color: #fff; - overflow-x: auto; - overflow-y: hidden; - margin: .8em 1em 1em 0em !important; - padding: var(--spacing-xl) 1.2em !important; - border-radius: var(--radius-lg) !important; -} -.message pre code, -.message pre code[class*=language-] { - color: #fff; - padding: 0; - margin: 0; - background-color: unset; - text-shadow: none; - font-family: var(--font-mono); -} - - -/* 覆盖prism.css */ -.language-css .token.string, -.style .token.string, -.token.entity, -.token.operator, -.token.url { - background: none !important; -} diff --git a/spaces/KOFTRFU204/AICoverGen/src/rvc.py b/spaces/KOFTRFU204/AICoverGen/src/rvc.py deleted file mode 100644 index a2790602462859e4a9885c145a13ff86efba8a3c..0000000000000000000000000000000000000000 --- a/spaces/KOFTRFU204/AICoverGen/src/rvc.py +++ /dev/null @@ -1,166 +0,0 @@ -from multiprocessing import cpu_count -from pathlib import Path - -import torch -from fairseq import checkpoint_utils -from scipy.io import wavfile - -from infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from my_utils import load_audio -from vc_infer_pipeline import VC - -BASE_DIR = Path(__file__).resolve().parent.parent - - -# config cpu -def use_fp32_config(): - for config_file in [ - "32k.json", - "40k.json", - "48k.json", - "48k_v2.json", - "32k_v2.json", - ]: - with open(f"src/configs/{config_file}", "r") as f: - strr = f.read().replace("true", "false") - with open(f"src/configs/{config_file}", "w") as f: - f.write(strr) - -class Config: - def __init__(self, device, is_half): - self.device = device - self.is_half = is_half - self.n_cpu = 2 # set cpu cores - self.gpu_name = None - self.gpu_mem = None - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("16 series/10 series P40 forced single precision") - self.is_half = False - for config_file in ["32k.json", "40k.json", "48k.json"]: - with open(BASE_DIR / "src" / "configs" / config_file, "r") as f: - strr = f.read().replace("true", "false") - with open(BASE_DIR / "src" / "configs" / config_file, "w") as f: - f.write(strr) - with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f: - strr = f.read().replace("3.7", "3.0") - with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f: - f.write(strr) - else: - self.gpu_name = None - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - if self.gpu_mem <= 4: - with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f: - strr = f.read().replace("3.7", "3.0") - with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f: - f.write(strr) - elif torch.backends.mps.is_available(): - print("No supported N-card found, use MPS for inference") - self.device = "mps" - else: - print("No supported N-card found, use CPU for inference") - self.device = "cpu" - self.is_half = False - use_fp32_config() # cpu config - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G memory config - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G memory config - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max - - -def load_hubert(device, is_half, model_path): - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) - hubert = models[0] - hubert = hubert.to(device) - - if is_half: - hubert = hubert.half() - else: - hubert = hubert.float() - - hubert.eval() - return hubert - - -def get_vc(device, is_half, config, model_path): - cpt = torch.load(model_path, map_location='cpu') - if "config" not in cpt or "weight" not in cpt: - raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') - - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(device) - - if is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - - vc = VC(tgt_sr, config) - return cpt, version, net_g, tgt_sr, vc - - -def rvc_infer(index_path, index_rate, input_path, output_path, pitch_change, f0_method, cpt, version, net_g, filter_radius, tgt_sr, rms_mix_rate, protect, crepe_hop_length, vc, hubert_model): - audio = load_audio(input_path, 16000) - times = [0, 0, 0] - if_f0 = cpt.get('f0', 1) - audio_opt = vc.pipeline(hubert_model, net_g, 0, audio, input_path, times, pitch_change, f0_method, index_path, index_rate, if_f0, filter_radius, tgt_sr, 0, rms_mix_rate, version, protect, crepe_hop_length) - wavfile.write(output_path, tgt_sr, audio_opt) diff --git a/spaces/Kevin676/AutoGPT/autogpt/commands/git_operations.py b/spaces/Kevin676/AutoGPT/autogpt/commands/git_operations.py deleted file mode 100644 index 028f3b8da44c85e01d20ccc5d4a5fa72c759008b..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/commands/git_operations.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Git operations for autogpt""" -import git - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -CFG = Config() - - -def clone_repository(repo_url: str, clone_path: str) -> str: - """Clone a GitHub repository locally - - Args: - repo_url (str): The URL of the repository to clone - clone_path (str): The path to clone the repository to - - Returns: - str: The result of the clone operation""" - split_url = repo_url.split("//") - auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) - safe_clone_path = path_in_workspace(clone_path) - try: - git.Repo.clone_from(auth_repo_url, safe_clone_path) - return f"""Cloned {repo_url} to {safe_clone_path}""" - except Exception as e: - return f"Error: {str(e)}" diff --git a/spaces/Kevin676/AutoGPT/autogpt/speech/eleven_labs.py b/spaces/Kevin676/AutoGPT/autogpt/speech/eleven_labs.py deleted file mode 100644 index ea84efd8ca9489b40919ecd571813fe954b078e3..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/speech/eleven_labs.py +++ /dev/null @@ -1,86 +0,0 @@ -"""ElevenLabs speech module""" -import os - -import requests -from playsound import playsound - -from autogpt.config import Config -from autogpt.speech.base import VoiceBase - -PLACEHOLDERS = {"your-voice-id"} - - -class ElevenLabsSpeech(VoiceBase): - """ElevenLabs speech class""" - - def _setup(self) -> None: - """Set up the voices, API key, etc. - - Returns: - None: None - """ - - cfg = Config() - default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] - voice_options = { - "Rachel": "21m00Tcm4TlvDq8ikWAM", - "Domi": "AZnzlk1XvdvUeBnXmlld", - "Bella": "EXAVITQu4vr4xnSDxMaL", - "Antoni": "ErXwobaYiN019PkySvjV", - "Elli": "MF3mGyEYCl7XYWbV9V6O", - "Josh": "TxGEqnHWrfWFTfGW9XjX", - "Arnold": "VR6AewLTigWG4xSOukaG", - "Adam": "pNInz6obpgDQGcFmaJgB", - "Sam": "yoZ06aMxZJJ28mfd3POQ", - } - self._headers = { - "Content-Type": "application/json", - "xi-api-key": cfg.elevenlabs_api_key, - } - self._voices = default_voices.copy() - if cfg.elevenlabs_voice_1_id in voice_options: - cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id] - if cfg.elevenlabs_voice_2_id in voice_options: - cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id] - self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0) - self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1) - - def _use_custom_voice(self, voice, voice_index) -> None: - """Use a custom voice if provided and not a placeholder - - Args: - voice (str): The voice ID - voice_index (int): The voice index - - Returns: - None: None - """ - # Placeholder values that should be treated as empty - if voice and voice not in PLACEHOLDERS: - self._voices[voice_index] = voice - - def _speech(self, text: str, voice_index: int = 0) -> bool: - """Speak text using elevenlabs.io's API - - Args: - text (str): The text to speak - voice_index (int, optional): The voice to use. Defaults to 0. - - Returns: - bool: True if the request was successful, False otherwise - """ - tts_url = ( - f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" - ) - response = requests.post(tts_url, headers=self._headers, json={"text": text}) - - if response.status_code == 200: - with open("speech.mpeg", "wb") as f: - f.write(response.content) - playsound("speech.mpeg", True) - os.remove("speech.mpeg") - return True - else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) - return False diff --git a/spaces/KevinQHLin/UniVTG/utils/cpd_auto.py b/spaces/KevinQHLin/UniVTG/utils/cpd_auto.py deleted file mode 100644 index b561b08c4764350843955b510402e3c0a28d62a7..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/utils/cpd_auto.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -from .cpd_nonlin import cpd_nonlin - -def cpd_auto(K, ncp, vmax, desc_rate=1, **kwargs): - """Main interface - - Detect change points automatically selecting their number - K - kernel between each pair of frames in video - ncp - maximum ncp - vmax - special parameter - Optional arguments: - lmin - minimum segment length - lmax - maximum segment length - desc_rate - rate of descriptor sampling (vmax always corresponds to 1x) - - Note: - - cps are always calculated in subsampled coordinates irrespective to - desc_rate - - lmin and m should be in agreement - --- - Returns: (cps, costs) - cps - best selected change-points - costs - costs for 0,1,2,...,m change-points - - Memory requirement: ~ (3*N*N + N*ncp)*4 bytes ~= 16 * N^2 bytes - That is 1,6 Gb for the N=10000. - """ - m = ncp - (_, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs) - # print("scores ",scores) - - N = K.shape[0] - N2 = N*desc_rate # length of the video before subsampling - - penalties = np.zeros(m+1) - # Prevent division by zero (in case of 0 changes) - ncp = np.arange(1, m+1) - penalties[1:] = (vmax*ncp/(2.0*N2))*(np.log(float(N2)/ncp)+1) - - costs = scores/float(N) + penalties - m_best = np.argmin(costs) - # print("cost ",costs) - # print("m_best ",m_best) - (cps, scores2) = cpd_nonlin(K, m_best, **kwargs) - - return (cps, costs) - - -# ------------------------------------------------------------------------------ -# Extra functions (currently not used) - -def estimate_vmax(K_stable): - """K_stable - kernel between all frames of a stable segment""" - n = K_stable.shape[0] - vmax = np.trace(centering(K_stable)/n) - return vmax - - -def centering(K): - """Apply kernel centering""" - mean_rows = np.mean(K, 1)[:, np.newaxis] - return K - mean_rows - mean_rows.T + np.mean(mean_rows) - - -def eval_score(K, cps): - """ Evaluate unnormalized empirical score - (sum of kernelized scatters) for the given change-points """ - N = K.shape[0] - cps = [0] + list(cps) + [N] - V1 = 0 - V2 = 0 - for i in range(len(cps)-1): - K_sub = K[cps[i]:cps[i+1], :][:, cps[i]:cps[i+1]] - V1 += np.sum(np.diag(K_sub)) - V2 += np.sum(K_sub) / float(cps[i+1] - cps[i]) - return (V1 - V2) - - -def eval_cost(K, cps, score, vmax): - """ Evaluate cost function for automatic number of change points selection - K - kernel between all frames - cps - selected change-points - score - unnormalized empirical score (sum of kernelized scatters) - vmax - vmax parameter""" - - N = K.shape[0] - penalty = (vmax*len(cps)/(2.0*N))*(np.log(float(N)/len(cps))+1) - return score/float(N) + penalty - diff --git a/spaces/Kimata/Sanskrit-TTS/attentions.py b/spaces/Kimata/Sanskrit-TTS/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Kreaols/ChuanhuChatGPT/modules/webui_locale.py b/spaces/Kreaols/ChuanhuChatGPT/modules/webui_locale.py deleted file mode 100644 index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000 --- a/spaces/Kreaols/ChuanhuChatGPT/modules/webui_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import locale -import commentjson as json - -class I18nAuto: - def __init__(self): - if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) - else: - config = {} - lang_config = config.get("language", "auto") - language = os.environ.get("LANGUAGE", lang_config) - if language == "auto": - language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN) - self.language_map = {} - self.file_is_exists = os.path.isfile(f"./locale/{language}.json") - if self.file_is_exists: - with open(f"./locale/{language}.json", "r", encoding="utf-8") as f: - self.language_map.update(json.load(f)) - - def __call__(self, key): - if self.file_is_exists and key in self.language_map: - return self.language_map[key] - else: - return key diff --git a/spaces/KyanChen/RSPrompter/README.md b/spaces/KyanChen/RSPrompter/README.md deleted file mode 100644 index f52516a7176601fb721585139b0c7f9fdb31d043..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: RSPrompter -emoji: 📚 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.35.2 -app_file: App_main.py -pinned: false ---- - -Arxiv: https://arxiv.org/abs/2306.16269 - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/fcos.py b/spaces/KyanChen/RSPrompter/mmdet/models/detectors/fcos.py deleted file mode 100644 index c628059313ac80644ec2ba2c806e7baf2e418a41..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/fcos.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.registry import MODELS -from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig -from .single_stage import SingleStageDetector - - -@MODELS.register_module() -class FCOS(SingleStageDetector): - """Implementation of `FCOS `_ - - Args: - backbone (:obj:`ConfigDict` or dict): The backbone config. - neck (:obj:`ConfigDict` or dict): The neck config. - bbox_head (:obj:`ConfigDict` or dict): The bbox head config. - train_cfg (:obj:`ConfigDict` or dict, optional): The training config - of FCOS. Defaults to None. - test_cfg (:obj:`ConfigDict` or dict, optional): The testing config - of FCOS. Defaults to None. - data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of - :class:`DetDataPreprocessor` to process the input data. - Defaults to None. - init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or - list[dict], optional): Initialization config dict. - Defaults to None. - """ - - def __init__(self, - backbone: ConfigType, - neck: ConfigType, - bbox_head: ConfigType, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None) -> None: - super().__init__( - backbone=backbone, - neck=neck, - bbox_head=bbox_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - data_preprocessor=data_preprocessor, - init_cfg=init_cfg) diff --git a/spaces/KyanChen/RSPrompter/mmdet/utils/setup_env.py b/spaces/KyanChen/RSPrompter/mmdet/utils/setup_env.py deleted file mode 100644 index a7b37845a883752a1659fabf62c7404cff971191..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/utils/setup_env.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import logging -import os -import platform -import warnings - -import cv2 -import torch.multiprocessing as mp -from mmengine import DefaultScope -from mmengine.logging import print_log -from mmengine.utils import digit_version - - -def setup_cache_size_limit_of_dynamo(): - """Setup cache size limit of dynamo. - - Note: Due to the dynamic shape of the loss calculation and - post-processing parts in the object detection algorithm, these - functions must be compiled every time they are run. - Setting a large value for torch._dynamo.config.cache_size_limit - may result in repeated compilation, which can slow down training - and testing speed. Therefore, we need to set the default value of - cache_size_limit smaller. An empirical value is 4. - """ - - import torch - if digit_version(torch.__version__) >= digit_version('2.0.0'): - if 'DYNAMO_CACHE_SIZE_LIMIT' in os.environ: - import torch._dynamo - cache_size_limit = int(os.environ['DYNAMO_CACHE_SIZE_LIMIT']) - torch._dynamo.config.cache_size_limit = cache_size_limit - print_log( - f'torch._dynamo.config.cache_size_limit is force ' - f'set to {cache_size_limit}.', - logger='current', - level=logging.WARNING) - - -def setup_multi_processes(cfg): - """Setup multi-processing environment variables.""" - # set multi-process start method as `fork` to speed up the training - if platform.system() != 'Windows': - mp_start_method = cfg.get('mp_start_method', 'fork') - current_method = mp.get_start_method(allow_none=True) - if current_method is not None and current_method != mp_start_method: - warnings.warn( - f'Multi-processing start method `{mp_start_method}` is ' - f'different from the previous setting `{current_method}`.' - f'It will be force set to `{mp_start_method}`. You can change ' - f'this behavior by changing `mp_start_method` in your config.') - mp.set_start_method(mp_start_method, force=True) - - # disable opencv multithreading to avoid system being overloaded - opencv_num_threads = cfg.get('opencv_num_threads', 0) - cv2.setNumThreads(opencv_num_threads) - - # setup OMP threads - # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa - workers_per_gpu = cfg.data.get('workers_per_gpu', 1) - if 'train_dataloader' in cfg.data: - workers_per_gpu = \ - max(cfg.data.train_dataloader.get('workers_per_gpu', 1), - workers_per_gpu) - - if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: - omp_num_threads = 1 - warnings.warn( - f'Setting OMP_NUM_THREADS environment variable for each process ' - f'to be {omp_num_threads} in default, to avoid your system being ' - f'overloaded, please further tune the variable for optimal ' - f'performance in your application as needed.') - os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) - - # setup MKL threads - if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: - mkl_num_threads = 1 - warnings.warn( - f'Setting MKL_NUM_THREADS environment variable for each process ' - f'to be {mkl_num_threads} in default, to avoid your system being ' - f'overloaded, please further tune the variable for optimal ' - f'performance in your application as needed.') - os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) - - -def register_all_modules(init_default_scope: bool = True) -> None: - """Register all modules in mmdet into the registries. - - Args: - init_default_scope (bool): Whether initialize the mmdet default scope. - When `init_default_scope=True`, the global default scope will be - set to `mmdet`, and all registries will build modules from mmdet's - registry node. To understand more about the registry, please refer - to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md - Defaults to True. - """ # noqa - import mmdet.datasets # noqa: F401,F403 - import mmdet.engine # noqa: F401,F403 - import mmdet.evaluation # noqa: F401,F403 - import mmdet.models # noqa: F401,F403 - import mmdet.visualization # noqa: F401,F403 - - if init_default_scope: - never_created = DefaultScope.get_current_instance() is None \ - or not DefaultScope.check_instance_created('mmdet') - if never_created: - DefaultScope.get_instance('mmdet', scope_name='mmdet') - return - current_scope = DefaultScope.get_current_instance() - if current_scope.scope_name != 'mmdet': - warnings.warn('The current default scope ' - f'"{current_scope.scope_name}" is not "mmdet", ' - '`register_all_modules` will force the current' - 'default scope to be "mmdet". If this is not ' - 'expected, please set `init_default_scope=False`.') - # avoid name conflict - new_instance_name = f'mmdet-{datetime.datetime.now()}' - DefaultScope.get_instance(new_instance_name, scope_name='mmdet') diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/heads/sam_instance_head.py b/spaces/KyanChen/RSPrompter/mmpl/models/heads/sam_instance_head.py deleted file mode 100644 index 36bee5bb870f3b23fc9d90885f57ac694e940465..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/models/heads/sam_instance_head.py +++ /dev/null @@ -1,1015 +0,0 @@ -import copy -import warnings -from typing import List, Optional, Tuple, Union, Dict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmengine import ConfigDict -from mmengine.structures import InstanceData -from torch import Tensor - -from mmdet.models import BaseDetector, TwoStageDetector, StandardRoIHead, SinePositionalEncoding, FCNMaskHead, \ - BaseRoIHead -from mmdet.models.task_modules import SamplingResult -from mmdet.models.utils import multi_apply, unpack_gt_instances, empty_instances -from mmdet.structures import SampleList, DetDataSample -from mmdet.structures.bbox import bbox2roi -from mmdet.structures.mask import mask_target -from mmdet.utils import InstanceList, reduce_mean, OptMultiConfig -from mmpl.registry import MODELS, TASK_UTILS -from mmengine.model import BaseModel, BaseModule -from einops import rearrange, repeat -from mmpl.utils import ConfigType, OptConfigType -from mmdet.models.dense_heads import Mask2FormerHead -from mmdet.models.dense_heads.anchor_free_head import AnchorFreeHead - -@MODELS.register_module() -class SAMInstanceHead(Mask2FormerHead): - def __init__( - self, - num_things_classes: int = 1, - num_stuff_classes: int = 0, - prompt_neck: ConfigType = ..., - with_iou: bool = False, - with_multiscale: bool = False, - with_sincos: bool = False, - with_res_imgfeat: bool = False, - loss_cls: ConfigType = dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=2.0, - reduction='mean', - class_weight=[1.0] * 133 + [0.1]), - loss_mask: ConfigType = dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=5.0), - loss_dice: ConfigType = dict( - type='DiceLoss', - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=True, - eps=1.0, - loss_weight=5.0), - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - init_cfg: OptMultiConfig = None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='ReLU', inplace=True), - **kwargs - ): - super(AnchorFreeHead, self).__init__(init_cfg=init_cfg) - - self.num_things_classes = num_things_classes - self.num_stuff_classes = num_stuff_classes - self.num_classes = self.num_things_classes + self.num_stuff_classes - self.with_iou = with_iou - self.with_multiscale = with_multiscale - self.with_sincos = with_sincos - self.with_res_imgfeat = with_res_imgfeat - - # self.num_transformer_feat_level = num_transformer_feat_level - # self.num_heads = transformer_decoder.layer_cfg.cross_attn_cfg.num_heads - # self.num_transformer_decoder_layers = transformer_decoder.num_layers - # assert pixel_decoder.encoder.layer_cfg. \ - # self_attn_cfg.num_levels == num_transformer_feat_level - # pixel_decoder_ = copy.deepcopy(pixel_decoder) - # pixel_decoder_.update( - # in_channels=in_channels, - # feat_channels=feat_channels, - # out_channels=out_channels) - # self.pixel_decoder = MODELS.build(pixel_decoder_) - # self.transformer_decoder = Mask2FormerTransformerDecoder( - # **transformer_decoder) - # self.decoder_embed_dims = self.transformer_decoder.embed_dims - # - # self.decoder_input_projs = ModuleList() - # # from low resolution to high resolution - # for _ in range(num_transformer_feat_level): - # if (self.decoder_embed_dims != feat_channels - # or enforce_decoder_input_project): - # self.decoder_input_projs.append( - # Conv2d( - # feat_channels, self.decoder_embed_dims, kernel_size=1)) - # else: - # self.decoder_input_projs.append(nn.Identity()) - # self.decoder_positional_encoding = SinePositionalEncoding( - # **positional_encoding) - # self.query_embed = nn.Embedding(self.num_queries, feat_channels) - # self.query_feat = nn.Embedding(self.num_queries, feat_channels) - # # from low resolution to high resolution - # self.level_embed = nn.Embedding(self.num_transformer_feat_level, - # feat_channels) - # - # self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) - # self.mask_embed = nn.Sequential( - # nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - # nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - # nn.Linear(feat_channels, out_channels)) - - self.prompt_neck = MODELS.build(prompt_neck) - self.num_queries = self.prompt_neck.num_queries - self.per_query_point = self.prompt_neck.per_query_point - out_channels = self.prompt_neck.out_channels - - self.cls_embed = nn.Sequential( - nn.Linear(out_channels, out_channels // 2), - nn.ReLU(inplace=True), - nn.Linear(out_channels // 2, self.num_classes + 1) - ) - - if self.with_sincos: - self.point_emb = nn.Sequential( - nn.Linear(out_channels, out_channels), - nn.ReLU(inplace=True), - nn.Linear(out_channels, out_channels), - nn.ReLU(inplace=True), - nn.Linear(out_channels, self.per_query_point * out_channels*2) - ) - else: - self.point_emb = nn.Sequential( - nn.Linear(out_channels, out_channels), - nn.ReLU(inplace=True), - nn.Linear(out_channels, out_channels), - nn.ReLU(inplace=True), - nn.Linear(out_channels, self.per_query_point * out_channels) - ) - - if self.with_res_imgfeat: - self.res_imgfeat = nn.Sequential( - nn.UpsamplingBilinear2d(scale_factor=2), - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - norm_cfg=norm_cfg, - act_cfg=act_cfg - ) - ) - - self.test_cfg = test_cfg - self.train_cfg = train_cfg - if train_cfg: - self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) - self.sampler = TASK_UTILS.build( - self.train_cfg['sampler'], default_args=dict(context=self)) - self.num_points = self.train_cfg.get('num_points', 12544) - self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) - self.importance_sample_ratio = self.train_cfg.get( - 'importance_sample_ratio', 0.75) - - self.class_weight = loss_cls.class_weight - self.loss_cls = MODELS.build(loss_cls) - self.loss_mask = MODELS.build(loss_mask) - self.loss_dice = MODELS.build(loss_dice) - - def forward(self, x: List[Tensor], - batch_data_samples: SampleList, - sam - ) -> Tuple[List[Tensor]]: - """Forward function. - - Args: - x (list[Tensor]): Multi scale Features from the - upstream network, each is a 4D-tensor. - batch_data_samples (List[:obj:`DetDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. - - Returns: - tuple[list[Tensor]]: A tuple contains two elements. - - - cls_pred_list (list[Tensor)]: Classification logits \ - for each decoder layer. Each is a 3D-tensor with shape \ - (batch_size, num_queries, cls_out_channels). \ - Note `cls_out_channels` should includes background. - - mask_pred_list (list[Tensor]): Mask logits for each \ - decoder layer. Each with shape (batch_size, num_queries, \ - h, w). - """ - batch_img_metas = [ - data_sample.metainfo for data_sample in batch_data_samples - ] - batch_size = len(batch_img_metas) - decoder_out, query_feat_list, res_img_feat = self.prompt_neck(x) - - if self.with_multiscale: - cls_pred_list = [self.cls_embed(query_feat) for query_feat in query_feat_list] - else: - # shape (batch_size, num_queries, c) - cls_pred_list = [self.cls_embed(decoder_out)] - # shape (batch_size, num_queries, c) - point_emb = self.point_emb(decoder_out) - # shape (batch_size, num_queries, per_query_point, c) - point_emb = point_emb.view(batch_size, self.num_queries, self.per_query_point, -1) - - img_seg_feat = x[0] - point_emb = rearrange(point_emb, 'b n p c -> (b n) p c') - if self.with_sincos: - point_emb = torch.sin(point_emb[..., ::2]) + point_emb[..., 1::2] - - nomask_dense_embeddings = sam.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( - point_emb.shape[0], -1, *img_seg_feat.shape[-2:] - ) - - img_embeddings = torch.repeat_interleave(img_seg_feat, self.num_queries, dim=0) - img_pe = sam.prompt_encoder.get_dense_pe() - img_pe = repeat(img_pe, 'b c h w -> (b n) c h w', n=img_embeddings.shape[0]) - - if self.with_res_imgfeat: - res_img_feat = self.res_imgfeat(res_img_feat) - res_img_feat = torch.repeat_interleave(res_img_feat, self.num_queries, dim=0) - else: - res_img_feat = None - - low_res_masks, iou_predictions = sam.mask_decoder.forward_batch( - image_embeddings=img_embeddings, - image_pe=img_pe, - sparse_prompt_embeddings=point_emb, - dense_prompt_embeddings=nomask_dense_embeddings, - multimask_output=False, - res_img_feat=res_img_feat, - ) - mask_pred = rearrange(low_res_masks.squeeze(1), '(b n) h w -> b n h w', b=batch_size) - - # optional - # if self.with_iou: - # iou_predictions = iou_predictions.view(batch_size, self.num_queries, -1) - # cls_pred = cls_pred * iou_predictions - - if self.with_multiscale: - mask_pred_list = [mask_pred] * len(cls_pred_list) - else: - mask_pred_list = [mask_pred] - - return cls_pred_list, mask_pred_list - - def predict(self, x: Tuple[Tensor], - batch_data_samples: SampleList, - sam - ) -> Tuple[Tensor]: - """Test without augmentaton. - - Args: - x (tuple[Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - batch_data_samples (List[:obj:`DetDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. - - Returns: - tuple[Tensor]: A tuple contains two tensors. - - - mask_cls_results (Tensor): Mask classification logits,\ - shape (batch_size, num_queries, cls_out_channels). - Note `cls_out_channels` should includes background. - - mask_pred_results (Tensor): Mask logits, shape \ - (batch_size, num_queries, h, w). - """ - batch_img_metas = [ - data_sample.metainfo for data_sample in batch_data_samples - ] - all_cls_scores, all_mask_preds = self(x, batch_data_samples, sam) - mask_cls_results = all_cls_scores[-1] - mask_pred_results = all_mask_preds[-1] - - # upsample masks - img_shape = batch_img_metas[0]['batch_input_shape'] - mask_pred_results = F.interpolate( - mask_pred_results, - size=(img_shape[0], img_shape[1]), - mode='bilinear', - align_corners=False) - - return mask_cls_results, mask_pred_results - - def loss( - self, - x: Tuple[Tensor], - batch_data_samples: SampleList, - sam, - ) -> Dict[str, Tensor]: - """Perform forward propagation and loss calculation of the panoptic - head on the features of the upstream network. - - Args: - x (tuple[Tensor]): Multi-level features from the upstream - network, each is a 4D-tensor. - batch_data_samples (List[:obj:`DetDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - batch_img_metas = [] - batch_gt_instances = [] - batch_gt_semantic_segs = [] - for data_sample in batch_data_samples: - batch_img_metas.append(data_sample.metainfo) - batch_gt_instances.append(data_sample.gt_instances) - if 'gt_sem_seg' in data_sample: - batch_gt_semantic_segs.append(data_sample.gt_sem_seg) - else: - batch_gt_semantic_segs.append(None) - - # forward - all_cls_scores, all_mask_preds = self(x, batch_data_samples, sam) - - # preprocess ground truth - batch_gt_instances = self.preprocess_gt(batch_gt_instances, - batch_gt_semantic_segs) - - # loss - losses = self.loss_by_feat(all_cls_scores, all_mask_preds, - batch_gt_instances, batch_img_metas) - - return losses - - -@MODELS.register_module() -class SAMAnchorInstanceHead(TwoStageDetector): - def __init__( - self, - sam_head=True, - neck: OptConfigType = None, - rpn_head: OptConfigType = None, - roi_head: OptConfigType = None, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - **kwargs - ): - super(TwoStageDetector, self).__init__() - self.neck = MODELS.build(neck) - self.sam_head = sam_head - - if rpn_head is not None: - rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None - rpn_head_ = rpn_head.copy() - rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) - rpn_head_num_classes = rpn_head_.get('num_classes', None) - if rpn_head_num_classes is None: - rpn_head_.update(num_classes=1) - else: - if rpn_head_num_classes != 1: - warnings.warn( - 'The `num_classes` should be 1 in RPN, but get ' - f'{rpn_head_num_classes}, please set ' - 'rpn_head.num_classes = 1 in your config file.') - rpn_head_.update(num_classes=1) - self.rpn_head = MODELS.build(rpn_head_) - - if roi_head is not None: - # update train and test cfg here for now - # TODO: refactor assigner & sampler - rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None - roi_head.update(train_cfg=rcnn_train_cfg) - roi_head.update(test_cfg=test_cfg.rcnn) - self.roi_head = MODELS.build(roi_head) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - def extract_feat(self, x): - x = self.neck(x) - return x - - def loss(self, - batch_inputs, - batch_data_samples: SampleList, - sam - ) -> dict: - """Calculate losses from a batch of inputs and data samples. - - Args: - batch_inputs (Tensor): Input images of shape (N, C, H, W). - These should usually be mean centered and std scaled. - batch_data_samples (List[:obj:`DetDataSample`]): The batch - data samples. It usually includes information such - as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. - - Returns: - dict: A dictionary of loss components - """ - x = self.extract_feat(batch_inputs) - img_seg_feat = batch_inputs[0] - losses = dict() - - # RPN forward and loss - if self.with_rpn: - proposal_cfg = self.train_cfg.get('rpn_proposal', - self.test_cfg.rpn) - rpn_data_samples = copy.deepcopy(batch_data_samples) - # set cat_id of gt_labels to 0 in RPN - for data_sample in rpn_data_samples: - data_sample.gt_instances.labels = \ - torch.zeros_like(data_sample.gt_instances.labels) - - rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict( - x, rpn_data_samples, proposal_cfg=proposal_cfg) - # avoid get same name with roi_head loss - keys = rpn_losses.keys() - for key in list(keys): - if 'loss' in key and 'rpn' not in key: - rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key) - losses.update(rpn_losses) - else: - assert batch_data_samples[0].get('proposals', None) is not None - # use pre-defined proposals in InstanceData for the second stage - # to extract ROI features. - rpn_results_list = [ - data_sample.proposals for data_sample in batch_data_samples - ] - if self.sam_head: - roi_losses = self.roi_head.loss(x, rpn_results_list, - batch_data_samples, - sam, img_seg_feat - ) - else: - roi_losses = self.roi_head.loss(x, rpn_results_list, - batch_data_samples - ) - losses.update(roi_losses) - - return losses - - def predict(self, - batch_inputs: Tensor, - batch_data_samples: SampleList, - sam, - rescale: bool = True - ) -> SampleList: - """Predict results from a batch of inputs and data samples with post- - processing. - - Args: - batch_inputs (Tensor): Inputs with shape (N, C, H, W). - batch_data_samples (List[:obj:`DetDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. - rescale (bool): Whether to rescale the results. - Defaults to True. - - Returns: - list[:obj:`DetDataSample`]: Return the detection results of the - input images. The returns value is DetDataSample, - which usually contain 'pred_instances'. And the - ``pred_instances`` usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - - masks (Tensor): Has a shape (num_instances, H, W). - """ - - assert self.with_bbox, 'Bbox head must be implemented.' - x = self.extract_feat(batch_inputs) - img_seg_feat = batch_inputs[0] - - # If there are no pre-defined proposals, use RPN to get proposals - if batch_data_samples[0].get('proposals', None) is None: - rpn_results_list = self.rpn_head.predict( - x, batch_data_samples, rescale=False) - else: - rpn_results_list = [ - data_sample.proposals for data_sample in batch_data_samples - ] - if self.sam_head: - results_list = self.roi_head.predict( - x, rpn_results_list, batch_data_samples, sam, img_seg_feat, rescale=rescale) - else: - results_list = self.roi_head.predict( - x, rpn_results_list, batch_data_samples, rescale=rescale) - - batch_data_samples = self.add_pred_to_datasample( - batch_data_samples, results_list) - return batch_data_samples - - -@MODELS.register_module() -class SAMAnchorPromptRoIHead(StandardRoIHead): - def __init__( - self, - positional_encoding=dict(num_feats=128, normalize=True), - *args, - **kwargs - ): - super(StandardRoIHead, self).__init__(*args, **kwargs) - self.generator_pe = SinePositionalEncoding(**positional_encoding) - - def _mask_forward(self, - x: Tuple[Tensor], - rois: Tensor = None, - pos_inds: Optional[Tensor] = None, - bbox_feats: Optional[Tensor] = None, - sam=None, img_seg_feat=None - ) -> dict: - """Mask head forward function used in both training and testing. - - Args: - x (tuple[Tensor]): Tuple of multi-level img features. - rois (Tensor): RoIs with the shape (n, 5) where the first - column indicates batch id of each RoI. - pos_inds (Tensor, optional): Indices of positive samples. - Defaults to None. - bbox_feats (Tensor): Extract bbox RoI features. Defaults to None. - - Returns: - dict[str, Tensor]: Usually returns a dictionary with keys: - - - `mask_preds` (Tensor): Mask prediction. - - `mask_feats` (Tensor): Extract mask RoI features. - """ - assert ((rois is not None) ^ - (pos_inds is not None and bbox_feats is not None)) - if rois is not None: - mask_feats = self.mask_roi_extractor( - x[:self.mask_roi_extractor.num_inputs], rois) - if self.with_shared_head: - mask_feats = self.shared_head(mask_feats) - else: - assert bbox_feats is not None - mask_feats = bbox_feats[pos_inds] - - mask_preds = self.mask_head(mask_feats, sam, img_seg_feat, img_flag_ids=rois[:, 0]) - mask_results = dict(mask_preds=mask_preds[0], mask_iou=mask_preds[1], mask_feats=mask_feats) - return mask_results - - def mask_loss(self, x: Tuple[Tensor], - sampling_results: List[SamplingResult], bbox_feats: Tensor, - batch_gt_instances: InstanceList, - sam, img_seg_feat - ) -> dict: - """Perform forward propagation and loss calculation of the mask head on - the features of the upstream network. - - Args: - x (tuple[Tensor]): Tuple of multi-level img features. - sampling_results (list["obj:`SamplingResult`]): Sampling results. - bbox_feats (Tensor): Extract bbox RoI features. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes``, ``labels``, and - ``masks`` attributes. - - Returns: - dict: Usually returns a dictionary with keys: - - - `mask_preds` (Tensor): Mask prediction. - - `mask_feats` (Tensor): Extract mask RoI features. - - `mask_targets` (Tensor): Mask target of each positive\ - proposals in the image. - - `loss_mask` (dict): A dictionary of mask loss components. - """ - if not self.share_roi_extractor: - pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) - mask_results = self._mask_forward( - x, pos_rois, sam=sam, img_seg_feat=img_seg_feat) - else: - pos_inds = [] - device = bbox_feats.device - for res in sampling_results: - pos_inds.append( - torch.ones( - res.pos_priors.shape[0], - device=device, - dtype=torch.uint8)) - pos_inds.append( - torch.zeros( - res.neg_priors.shape[0], - device=device, - dtype=torch.uint8)) - pos_inds = torch.cat(pos_inds) - - mask_results = self._mask_forward( - x, pos_inds=pos_inds, bbox_feats=bbox_feats) - - mask_loss_and_target = self.mask_head.loss_and_target( - mask_preds=mask_results['mask_preds'], - sampling_results=sampling_results, - batch_gt_instances=batch_gt_instances, - rcnn_train_cfg=self.train_cfg) - - mask_results.update(loss_mask=mask_loss_and_target['loss_mask']) - return mask_results - - def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, - batch_data_samples: List[DetDataSample], - sam, img_seg_feat - ) -> dict: - """Perform forward propagation and loss calculation of the detection - roi on the features of the upstream network. - - Args: - x (tuple[Tensor]): List of multi-level img features. - rpn_results_list (list[:obj:`InstanceData`]): List of region - proposals. - batch_data_samples (list[:obj:`DetDataSample`]): The batch - data samples. It usually includes information such - as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. - - Returns: - dict[str, Tensor]: A dictionary of loss components - """ - x = list(x) - bs, _, h, w = x[-1].shape - mask_pe = torch.zeros((bs, h, w), device=x[0].device, dtype=torch.bool) - img_feats_pe = self.generator_pe(mask_pe) - for i in range(len(x)): - x[i] = x[i] + torch.nn.functional.interpolate(img_feats_pe, size=x[i].shape[-2:], mode='bilinear') - - assert len(rpn_results_list) == len(batch_data_samples) - outputs = unpack_gt_instances(batch_data_samples) - batch_gt_instances, batch_gt_instances_ignore, _ = outputs - - # assign gts and sample proposals - num_imgs = len(batch_data_samples) - sampling_results = [] - for i in range(num_imgs): - # rename rpn_results.bboxes to rpn_results.priors - rpn_results = rpn_results_list[i] - rpn_results.priors = rpn_results.pop('bboxes') - - assign_result = self.bbox_assigner.assign( - rpn_results, batch_gt_instances[i], - batch_gt_instances_ignore[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - rpn_results, - batch_gt_instances[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - losses = dict() - # bbox head loss - if self.with_bbox: - bbox_results = self.bbox_loss(x, sampling_results) - losses.update(bbox_results['loss_bbox']) - - # mask head forward and loss - if self.with_mask: - mask_results = self.mask_loss(x, sampling_results, - bbox_results['bbox_feats'], - batch_gt_instances, - sam, img_seg_feat - ) - losses.update(mask_results['loss_mask']) - - return losses - - - def predict_mask(self, - x: Tuple[Tensor], - batch_img_metas: List[dict], - results_list: InstanceList, - rescale: bool = False, - sam=None, img_seg_feat=None - ) -> InstanceList: - """Perform forward propagation of the mask head and predict detection - results on the features of the upstream network. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - batch_img_metas (list[dict]): List of image information. - results_list (list[:obj:`InstanceData`]): Detection results of - each image. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - - Returns: - list[:obj:`InstanceData`]: Detection results of each image - after the post process. - Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - - masks (Tensor): Has a shape (num_instances, H, W). - """ - # don't need to consider aug_test. - bboxes = [res.bboxes for res in results_list] - mask_rois = bbox2roi(bboxes) - if mask_rois.shape[0] == 0: - results_list = empty_instances( - batch_img_metas, - mask_rois.device, - task_type='mask', - instance_results=results_list, - mask_thr_binary=self.test_cfg.mask_thr_binary) - return results_list - - mask_results = self._mask_forward(x, mask_rois, sam=sam, img_seg_feat=img_seg_feat) - mask_preds = mask_results['mask_preds'] - # split batch mask prediction back to each image - num_mask_rois_per_img = [len(res) for res in results_list] - mask_preds = mask_preds.split(num_mask_rois_per_img, 0) - - # TODO: Handle the case where rescale is false - results_list = self.mask_head.predict_by_feat( - mask_preds=mask_preds, - results_list=results_list, - batch_img_metas=batch_img_metas, - rcnn_test_cfg=self.test_cfg, - rescale=rescale) - return results_list - - def predict(self, - x: Tuple[Tensor], - rpn_results_list: InstanceList, - batch_data_samples: SampleList, - sam, img_seg_feat, - rescale: bool = False) -> InstanceList: - """Perform forward propagation of the roi head and predict detection - results on the features of the upstream network. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (N, C, H, W). - rpn_results_list (list[:obj:`InstanceData`]): list of region - proposals. - batch_data_samples (List[:obj:`DetDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. - rescale (bool): Whether to rescale the results to - the original image. Defaults to True. - - Returns: - list[obj:`InstanceData`]: Detection results of each image. - Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - - masks (Tensor): Has a shape (num_instances, H, W). - """ - x = list(x) - bs, _, h, w = x[-1].shape - mask_pe = torch.zeros((bs, h, w), device=x[0].device, dtype=torch.bool) - img_feats_pe = self.generator_pe(mask_pe) - for i in range(len(x)): - x[i] = x[i] + torch.nn.functional.interpolate(img_feats_pe, size=x[i].shape[-2:], mode='bilinear') - - assert self.with_bbox, 'Bbox head must be implemented.' - batch_img_metas = [ - data_samples.metainfo for data_samples in batch_data_samples - ] - - # TODO: nms_op in mmcv need be enhanced, the bbox result may get - # difference when not rescale in bbox_head - - # If it has the mask branch, the bbox branch does not need - # to be scaled to the original image scale, because the mask - # branch will scale both bbox and mask at the same time. - bbox_rescale = rescale if not self.with_mask else False - results_list = self.predict_bbox( - x, - batch_img_metas, - rpn_results_list, - rcnn_test_cfg=self.test_cfg, - rescale=bbox_rescale) - - if self.with_mask: - results_list = self.predict_mask( - x, batch_img_metas, results_list, rescale=rescale, sam=sam, img_seg_feat=img_seg_feat) - - return results_list - - -@MODELS.register_module() -class SAMPromptMaskHead(FCNMaskHead): - - def __init__(self, - per_query_point: int = 5, - with_sincos: bool = True, - class_agnostic: bool = False, - loss_mask: ConfigType = dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), - *args, - **kwargs - ) -> None: - super(BaseModule, self).__init__() - - self.per_query_point = per_query_point - self.with_sincos = with_sincos - self.class_agnostic = class_agnostic - - self.loss_mask = MODELS.build(loss_mask) - - if with_sincos: - sincos = 2 - else: - sincos = 1 - self.point_emb = nn.Sequential( - nn.Conv2d(256, 256, 3, stride=2, padding=1), - nn.BatchNorm2d(256), - nn.ReLU(inplace=True), - nn.Flatten(), - nn.Linear(7*7*256, 256), - nn.ReLU(inplace=True), - nn.Linear(256, 256), - nn.ReLU(inplace=True), - nn.Linear(256, 256*sincos*per_query_point) - ) - - def forward(self, x, sam, img_seg_feat, img_flag_ids) -> Tensor: - batch_size = x.shape[0] - point_emb = self.point_emb(x) - point_emb = point_emb.view(batch_size, self.per_query_point, -1) - if self.with_sincos: - point_emb = torch.sin(point_emb[..., ::2]) + point_emb[..., 1::2] - - nomask_dense_embeddings = sam.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( - point_emb.shape[0], -1, *img_seg_feat.shape[-2:] - ) - img_flag_ids = torch.bincount(img_flag_ids.long()) - padding = torch.zeros((len(img_seg_feat)-len(img_flag_ids),), device=img_flag_ids.device, dtype=img_flag_ids.dtype) - img_flag_ids = torch.cat([img_flag_ids, padding]) - img_embeddings = torch.repeat_interleave(img_seg_feat, img_flag_ids, dim=0) - img_pe = sam.prompt_encoder.get_dense_pe() - img_pe = repeat(img_pe, 'b c h w -> (b n) c h w', n=img_embeddings.shape[0]) - - res_img_feat = None - low_res_masks, iou_predictions = sam.mask_decoder.forward_batch( - image_embeddings=img_embeddings, - image_pe=img_pe, - sparse_prompt_embeddings=point_emb, - dense_prompt_embeddings=nomask_dense_embeddings, - multimask_output=False, - res_img_feat=res_img_feat, - ) - mask_pred = low_res_masks.squeeze(1) - iou_predictions = iou_predictions.squeeze(1) - return mask_pred, iou_predictions - - def get_targets(self, sampling_results: List[SamplingResult], - batch_gt_instances: InstanceList, - rcnn_train_cfg: ConfigDict) -> Tensor: - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Args: - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes``, ``labels``, and - ``masks`` attributes. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - - Returns: - Tensor: Mask target of each positive proposals in the image. - """ - pos_proposals = [res.pos_priors for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - gt_masks = [res.masks for res in batch_gt_instances] - - mask_targets_list = [] - mask_size = (rcnn_train_cfg.mask_size,) * 2 - device = pos_proposals[0].device - for pos_gt_inds, gt_mask in zip(pos_assigned_gt_inds, gt_masks): - if len(pos_gt_inds) == 0: - mask_targets = torch.zeros((0,) + mask_size, device=device, dytpe=torch.float32) - else: - mask_targets = gt_mask[pos_gt_inds.cpu()].to_tensor(dtype=torch.float32, device=device) - mask_targets_list.append(mask_targets) - mask_targets = torch.cat(mask_targets_list) - return mask_targets - - def loss_and_target(self, mask_preds: Tensor, - sampling_results: List[SamplingResult], - batch_gt_instances: InstanceList, - rcnn_train_cfg: ConfigDict) -> dict: - """Calculate the loss based on the features extracted by the mask head. - - Args: - mask_preds (Tensor): Predicted foreground masks, has shape - (num_pos, num_classes, h, w). - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes``, ``labels``, and - ``masks`` attributes. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - - Returns: - dict: A dictionary of loss and targets components. - """ - mask_targets = self.get_targets( - sampling_results=sampling_results, - batch_gt_instances=batch_gt_instances, - rcnn_train_cfg=rcnn_train_cfg) - - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - - mask_preds = torch.nn.functional.interpolate( - mask_preds.unsqueeze(1), size=mask_targets.shape[-2:], mode='bilinear', align_corners=False) - loss = dict() - if mask_preds.size(0) == 0: - loss_mask = mask_preds.sum() - else: - if self.class_agnostic: - loss_mask = self.loss_mask(mask_preds, mask_targets, - torch.zeros_like(pos_labels)) - else: - loss_mask = self.loss_mask(mask_preds, mask_targets, - pos_labels) - loss['loss_mask'] = loss_mask - # TODO: which algorithm requires mask_targets? - return dict(loss_mask=loss, mask_targets=mask_targets) - - def _predict_by_feat_single(self, - mask_preds: Tensor, - bboxes: Tensor, - labels: Tensor, - img_meta: dict, - rcnn_test_cfg: ConfigDict, - rescale: bool = False, - activate_map: bool = False) -> Tensor: - """Get segmentation masks from mask_preds and bboxes. - - Args: - mask_preds (Tensor): Predicted foreground masks, has shape - (n, num_classes, h, w). - bboxes (Tensor): Predicted bboxes, has shape (n, 4) - labels (Tensor): Labels of bboxes, has shape (n, ) - img_meta (dict): image information. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. - Defaults to None. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - activate_map (book): Whether get results with augmentations test. - If True, the `mask_preds` will not process with sigmoid. - Defaults to False. - - Returns: - Tensor: Encoded masks, has shape (n, img_w, img_h) - - Example: - >>> from mmengine.config import Config - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> self = FCNMaskHead(num_classes=C, num_convs=0) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_preds = self.forward(inputs) - >>> # Each input is associated with some bounding box - >>> bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) - >>> labels = torch.randint(0, C, size=(N,)) - >>> rcnn_test_cfg = Config({'mask_thr_binary': 0, }) - >>> ori_shape = (H * 4, W * 4) - >>> scale_factor = (1, 1) - >>> rescale = False - >>> img_meta = {'scale_factor': scale_factor, - ... 'ori_shape': ori_shape} - >>> # Encoded masks are a list for each category. - >>> encoded_masks = self._get_seg_masks_single( - ... mask_preds, bboxes, labels, - ... img_meta, rcnn_test_cfg, rescale) - >>> assert encoded_masks.size()[0] == N - >>> assert encoded_masks.size()[1:] == ori_shape - """ - scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( - (1, 2)) - img_h, img_w = img_meta['ori_shape'][:2] - device = bboxes.device - - if not activate_map: - mask_preds = mask_preds.sigmoid() - else: - # In AugTest, has been activated before - mask_preds = bboxes.new_tensor(mask_preds) - - if rescale: # in-placed rescale the bboxes - bboxes /= scale_factor - else: - w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1] - img_h = np.round(img_h * h_scale.item()).astype(np.int32) - img_w = np.round(img_w * w_scale.item()).astype(np.int32) - - threshold = rcnn_test_cfg.mask_thr_binary - - im_mask = torch.nn.functional.interpolate( - mask_preds.unsqueeze(1), size=(img_h, img_w), mode='bilinear', align_corners=False).squeeze(1) - - if threshold >= 0: - im_mask = im_mask >= threshold - else: - # for visualization and debugging - im_mask = (im_mask * 255).to(dtype=torch.uint8) - return im_mask \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/datasets/cub.py b/spaces/KyanChen/RSPrompter/mmpretrain/datasets/cub.py deleted file mode 100644 index 8db126216fb3408e2dd18255db04a851eb5fe08f..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/datasets/cub.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List - -from mmengine import get_file_backend, list_from_file -from mmengine.logging import MMLogger - -from mmpretrain.registry import DATASETS -from .base_dataset import BaseDataset -from .categories import CUB_CATEGORIES - - -@DATASETS.register_module() -class CUB(BaseDataset): - """The CUB-200-2011 Dataset. - - Support the `CUB-200-2011 `_ Dataset. - Comparing with the `CUB-200 `_ Dataset, - there are much more pictures in `CUB-200-2011`. After downloading and decompression, the dataset - directory structure is as follows. - - CUB dataset directory: :: - - CUB_200_2011 - ├── images - │ ├── class_x - │ │ ├── xx1.jpg - │ │ ├── xx2.jpg - │ │ └── ... - │ ├── class_y - │ │ ├── yy1.jpg - │ │ ├── yy2.jpg - │ │ └── ... - │ └── ... - ├── images.txt - ├── image_class_labels.txt - ├── train_test_split.txt - └── .... - - Args: - data_root (str): The root directory for CUB-200-2011 dataset. - split (str, optional): The dataset split, supports "train" and "test". - Default to "train". - - Examples: - >>> from mmpretrain.datasets import CUB - >>> train_dataset = CUB(data_root='data/CUB_200_2011', split='train') - >>> train_dataset - Dataset CUB - Number of samples: 5994 - Number of categories: 200 - Root of dataset: data/CUB_200_2011 - >>> test_dataset = CUB(data_root='data/CUB_200_2011', split='test') - >>> test_dataset - Dataset CUB - Number of samples: 5794 - Number of categories: 200 - Root of dataset: data/CUB_200_2011 - """ # noqa: E501 - - METAINFO = {'classes': CUB_CATEGORIES} - - def __init__(self, - data_root: str, - split: str = 'train', - test_mode: bool = False, - **kwargs): - - splits = ['train', 'test'] - assert split in splits, \ - f"The split must be one of {splits}, but get '{split}'" - self.split = split - - # To handle the BC-breaking - if split == 'train' and test_mode: - logger = MMLogger.get_current_instance() - logger.warning('split="train" but test_mode=True. ' - 'The training set will be used.') - - ann_file = 'images.txt' - data_prefix = 'images' - image_class_labels_file = 'image_class_labels.txt' - train_test_split_file = 'train_test_split.txt' - - self.backend = get_file_backend(data_root, enable_singleton=True) - self.image_class_labels_file = self.backend.join_path( - data_root, image_class_labels_file) - self.train_test_split_file = self.backend.join_path( - data_root, train_test_split_file) - super(CUB, self).__init__( - ann_file=ann_file, - data_root=data_root, - data_prefix=data_prefix, - test_mode=test_mode, - **kwargs) - - def _load_data_from_txt(self, filepath): - """load data from CUB txt file, the every line of the file is idx and a - data item.""" - pairs = list_from_file(filepath) - data_dict = dict() - for pair in pairs: - idx, data_item = pair.split() - # all the index starts from 1 in CUB files, - # here we need to '- 1' to let them start from 0. - data_dict[int(idx) - 1] = data_item - return data_dict - - def load_data_list(self): - """Load images and ground truth labels.""" - sample_dict = self._load_data_from_txt(self.ann_file) - - label_dict = self._load_data_from_txt(self.image_class_labels_file) - - split_dict = self._load_data_from_txt(self.train_test_split_file) - - assert sample_dict.keys() == label_dict.keys() == split_dict.keys(),\ - f'sample_ids should be same in files {self.ann_file}, ' \ - f'{self.image_class_labels_file} and {self.train_test_split_file}' - - data_list = [] - for sample_id in sample_dict.keys(): - if split_dict[sample_id] == '1' and self.split == 'test': - # skip train samples when split='test' - continue - elif split_dict[sample_id] == '0' and self.split == 'train': - # skip test samples when split='train' - continue - - img_path = self.backend.join_path(self.img_prefix, - sample_dict[sample_id]) - gt_label = int(label_dict[sample_id]) - 1 - info = dict(img_path=img_path, gt_label=gt_label) - data_list.append(info) - - return data_list - - def extra_repr(self) -> List[str]: - """The extra repr information of the dataset.""" - body = [ - f'Root of dataset: \t{self.data_root}', - ] - return body diff --git a/spaces/Kyo-Kai/Fsg-pp/README.md b/spaces/Kyo-Kai/Fsg-pp/README.md deleted file mode 100644 index fe60521aaef208d0a88959f26ad6c6aaf6e3036c..0000000000000000000000000000000000000000 --- a/spaces/Kyo-Kai/Fsg-pp/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Finally Some Good Profile Pictures -emoji: 😉 -colorFrom: purple -colorTo: blue -sdk: docker -pinned: false -license: gpl-3.0 ---- \ No newline at end of file diff --git a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/modules/__init__.py b/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/modules/__init__.py deleted file mode 100644 index 6fdbf03359958f3d67ab00f879bf6b61a6c8f06a..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/modules/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# ------------------------------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -# ------------------------------------------------------------------------------------------------ - -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR - -from .ms_deform_attn import MSDeformAttn diff --git a/spaces/Lamai/LAMAIGPT/tests/unit/test_browse_scrape_text.py b/spaces/Lamai/LAMAIGPT/tests/unit/test_browse_scrape_text.py deleted file mode 100644 index fea5ebfc05d466c7cb5711b5ac10e2ea102ddc45..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/tests/unit/test_browse_scrape_text.py +++ /dev/null @@ -1,98 +0,0 @@ -# Generated by CodiumAI - -import requests - -from autogpt.commands.web_requests import scrape_text - -""" -Code Analysis - -Objective: -The objective of the "scrape_text" function is to scrape the text content from -a given URL and return it as a string, after removing any unwanted HTML tags and scripts. - -Inputs: -- url: a string representing the URL of the webpage to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return an error message. -3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. -4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. -5. Split the text into lines and then into chunks, removing any extra whitespace. -6. Join the chunks into a single string with newline characters between them. -7. Return the cleaned text. - -Outputs: -- A string representing the cleaned text content of the webpage. - -Additional aspects: -- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. -- The function removes script and style tags from the HTML to avoid including unwanted content in the text output. -- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. -""" - - -class TestScrapeText: - # Tests that scrape_text() returns the expected text when given a valid URL. - def test_scrape_text_with_valid_url(self, mocker): - # Mock the requests.get() method to return a response with expected text - expected_text = "This is some sample text" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = f"

{expected_text}

" - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL and assert that it returns the expected text - url = "http://www.example.com" - assert scrape_text(url) == expected_text - - # Tests that the function returns an error message when an invalid or unreachable url is provided. - def test_invalid_url(self, mocker): - # Mock the requests.get() method to raise an exception - mocker.patch( - "requests.Session.get", side_effect=requests.exceptions.RequestException - ) - - # Call the function with an invalid URL and assert that it returns an error message - url = "http://www.invalidurl.com" - error_message = scrape_text(url) - assert "Error:" in error_message - - # Tests that the function returns an empty string when the html page contains no text to be scraped. - def test_no_text(self, mocker): - # Mock the requests.get() method to return a response with no text - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "" - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL and assert that it returns an empty string - url = "http://www.example.com" - assert scrape_text(url) == "" - - # Tests that the function returns an error message when the response status code is an http error (>=400). - def test_http_error(self, mocker): - # Mock the requests.get() method to return a response with a 404 status code - mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) - - # Call the function with a URL - result = scrape_text("https://www.example.com") - - # Check that the function returns an error message - assert result == "Error: HTTP 404 error" - - # Tests that scrape_text() properly handles HTML tags. - def test_scrape_text_with_html_tags(self, mocker): - # Create a mock response object with HTML containing tags - html = "

This is bold text.

" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = html - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL - result = scrape_text("https://www.example.com") - - # Check that the function properly handles HTML tags - assert result == "This is bold text." diff --git a/spaces/Layer6/TR0N/README.md b/spaces/Layer6/TR0N/README.md deleted file mode 100644 index 4cef08da4b9280b4559d9a23fcbb01c5bc0cd42c..0000000000000000000000000000000000000000 --- a/spaces/Layer6/TR0N/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TR0N -emoji: 🐨 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Lihuchen/AcroBERT/app.py b/spaces/Lihuchen/AcroBERT/app.py deleted file mode 100644 index 63c91ae8d81a41f1bdaa7da5e2f09056e3da59cd..0000000000000000000000000000000000000000 --- a/spaces/Lihuchen/AcroBERT/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr -from acrobert import acronym_linker - - -def greet(sentence): - results = acronym_linker(sentence, mode='acrobert') - return results - - -sample_list = [ - "The LDA is an example of a topic model.", - "Using a camera sensor, LDA judges the position of your vehicle in relation to the road markings below. This keeps the driver consistently aware of their situation, so that corrective action can be taken when needed.", - "AI is a wide-ranging branch of computer science concerned with building smart machines capable of performing tasks that typically require human intelligence. ", - "In the United States, the AI for potassium for adults is 4.7 grams", - "This new genome assembly and the annotation are tagged as a RefSeq genome by NCBI and thus provide substantially enhanced genomic resources for future research involving S. scovelli.", - "In this study, we found that miR-34a demonstrated greater expression in the lungs of patients with IPF and in mice with experimental pulmonary fibrosis , with its primary localization in lung fibroblasts.", -] - -iface = gr.Interface(fn=greet, inputs="text", outputs="text", examples=sample_list, cache_examples=False) -iface.launch() \ No newline at end of file diff --git a/spaces/Lngo/paragon-AI-blip2-image-to-text/README.md b/spaces/Lngo/paragon-AI-blip2-image-to-text/README.md deleted file mode 100644 index a6912878908e3eeac160f812ecb00fe47e8af6fb..0000000000000000000000000000000000000000 --- a/spaces/Lngo/paragon-AI-blip2-image-to-text/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Paragon AI Blip2 Image To Text -emoji: 🌖 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/LuxOAI/ChatGpt-Web/docs/faq-cn.md b/spaces/LuxOAI/ChatGpt-Web/docs/faq-cn.md deleted file mode 100644 index 6251889ca774e4df369303727c02a8769b1da4dd..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/docs/faq-cn.md +++ /dev/null @@ -1,165 +0,0 @@ -# 常见问题 - -## 如何快速获得帮助? -1. 询问ChatGPT / Bing / 百度 / Google等。 -2. 询问网友。请提供问题的背景信息和碰到问题的详细描述。高质量的提问容易获得有用的答案。 - -# 部署相关问题 - -各种部署方式详细教程参考:https://rptzik3toh.feishu.cn/docx/XtrdduHwXoSCGIxeFLlcEPsdn8b - -## 为什么 Docker 部署版本一直提示更新 -Docker 版本相当于稳定版,latest Docker 总是与 latest release version 一致,目前我们的发版频率是一到两天发一次,所以 Docker 版本会总是落后最新的提交一到两天,这在预期内。 - -## 如何部署在Vercel上 -1. 注册Github账号,fork该项目 -2. 注册Vercel(需手机验证,可以用中国号码),连接你的Github账户 -3. Vercel上新建项目,选择你在Github fork的项目,按需填写环境变量,开始部署。部署之后,你可以在有梯子的条件下,通过vercel提供的域名访问你的项目。 -4. 如果需要在国内无墙访问:在你的域名管理网站,添加一条域名的CNAME记录,指向cname.vercel-dns.com。之后在Vercel上设置你的域名访问。 - -## 如何修改 Vercel 环境变量 -- 进入 vercel 的控制台页面; -- 选中你的 chatgpt next web 项目; -- 点击页面头部的 Settings 选项; -- 找到侧边栏的 Environment Variables 选项; -- 修改对应的值即可。 - -## 环境变量CODE是什么?必须设置吗? -这是你自定义的访问密码,你可以选择: -1. 不设置,删除该环境变量即可。谨慎:此时任何人可以访问你的项目。 -2. 部署项目时,设置环境变量CODE(支持多个密码逗号分隔)。设置访问密码后,用户需要在设置界面输入访问密码才可以使用。参见[相关说明](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E9%85%8D%E7%BD%AE%E9%A1%B5%E9%9D%A2%E8%AE%BF%E9%97%AE%E5%AF%86%E7%A0%81) - -## 为什么我部署的版本没有流式响应 -> 相关讨论:[#386](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/386) - -如果你使用 ngnix 反向代理,需要在配置文件中增加下列代码: -``` -# 不缓存,支持流式输出 -proxy_cache off; # 关闭缓存 -proxy_buffering off; # 关闭代理缓冲 -chunked_transfer_encoding on; # 开启分块传输编码 -tcp_nopush on; # 开启TCP NOPUSH选项,禁止Nagle算法 -tcp_nodelay on; # 开启TCP NODELAY选项,禁止延迟ACK算法 -keepalive_timeout 300; # 设定keep-alive超时时间为65秒 -``` - -如果你是在 netlify 部署,此问题依然等待解决,请耐心等待。 - -## 我部署好了,但是无法访问 -请检查排除以下问题: -- 服务启动了吗? -- 端口正确映射了吗? -- 防火墙开放端口了吗? -- 到服务器的路由通吗? -- 域名正确解析了吗? - -## 什么是代理,如何使用? -由于OpenAI的IP限制,中国和其他一些国家/地区无法直接连接OpenAI API,需要通过代理。你可以使用代理服务器(正向代理),或者已经设置好的OpenAI API反向代理。 -- 正向代理例子:科学上网梯子。docker部署的情况下,设置环境变量HTTP_PROXY为你的代理地址(例如:10.10.10.10:8002)。 -- 反向代理例子:可以用别人搭建的代理地址,或者通过Cloudflare免费设置。设置项目环境变量BASE_URL为你的代理地址。 - -## 国内服务器可以部署吗? -可以但需要解决的问题: -- 需要代理才能连接github和openAI等网站; -- 国内服务器要设置域名解析的话需要备案; -- 国内政策限制代理访问外网/ChatGPT相关应用,可能被封。 - - -# 使用相关问题 - -## 为什么会一直提示“出错了,稍后重试吧” -原因可能有很多,请依次排查: -- 请先检查你的代码版本是否为最新版本,更新到最新版本后重试; -- 请检查 api key 是否设置正确,环境变量名称必须为全大写加下划线; -- 请检查 api key 是否可用; -- 如果经历了上述步骤依旧无法确定问题,请在 issue 区提交一个新 issue,并附上 vercel 的 runtime log 或者 docker 运行时的 log。 - -## 为什么 ChatGPT 的回复会乱码 -设置界面 - 模型设置项中,有一项为 `temperature`,如果此值大于 1,那么就有可能造成回复乱码,将其调回 1 以内即可。 - -## 使用时提示“现在是未授权状态,请在设置页输入访问密码”? -项目通过环境变量CODE设置了访问密码。第一次使用时,需要到设置中,输入访问码才可以使用。 - -## 使用时提示"You exceeded your current quota, ..." -API KEY有问题。余额不足。 - -# 网络服务相关问题 -## Cloudflare是什么? -Cloudflare(CF)是一个提供CDN,域名管理,静态页面托管,边缘计算函数部署等的网络服务供应商。常见的用途:购买和/或托管你的域名(解析、动态域名等),给你的服务器套上CDN(可以隐藏ip免被墙),部署网站(CF Pages)。CF免费提供大多数服务。 - -## Vercel是什么? -Vercel 是一个全球化的云平台,旨在帮助开发人员更快地构建和部署现代 Web 应用程序。本项目以及许多Web应用可以一键免费部署在Vercel上。无需懂代码,无需懂linux,无需服务器,无需付费,无需设置OpenAI API代理。缺点是需要绑定域名才可以在国内无墙访问。 - -## 如何获得一个域名? -1. 自己去域名供应商处注册,国外有Namesilo(支持支付宝), Cloudflare等等,国内有万网等等; -2. 免费的域名供应商:eu.org(二级域名)等; -3. 问朋友要一个免费的二级域名。 - -## 如何获得一台服务器 -- 国外服务器供应商举例:亚马逊云,谷歌云,Vultr,Bandwagon,Hostdare,等等; -国外服务器事项:服务器线路影响国内访问速度,推荐CN2 GIA和CN2线路的服务器。若服务器在国内访问困难(丢包严重等),可以尝试套CDN(Cloudflare等供应商)。 -- 国内服务器供应商:阿里云,腾讯等; -国内服务器事项:解析域名需要备案;国内服务器带宽较贵;访问国外网站(Github, openAI等)需要代理。 - -## 什么情况下服务器要备案? -在中国大陆经营的网站按监管要求需要备案。实际操作中,服务器位于国内且有域名解析的情况下,服务器供应商会执行监管的备案要求,否则会关停服务。通常的规则如下: -|服务器位置|域名供应商|是否需要备案| -|---|---|---| -|国内|国内|是| -|国内|国外|是| -|国外|国外|否| -|国外|国内|通常否| - -换服务器供应商后需要转备案。 - -# OpenAI相关问题 -## 如何注册OpenAI账号? -去chat.openai.com注册。你需要: -- 一个良好的梯子(OpenAI支持地区原生IP地址) -- 一个支持的邮箱(例如Gmail或者公司/学校邮箱,非Outlook或qq邮箱) -- 接收短信认证的方式(例如SMS-activate网站) - -## 怎么开通OpenAI API? 怎么查询API余额? -官网地址(需梯子):https://platform.openai.com/account/usage -有网友搭建了无需梯子的余额查询代理,请询问网友获取。请鉴别来源是否可靠,以免API Key泄露。 - -## 我新注册的OpenAI账号怎么没有API余额? -(4月6日更新)新注册账号通常会在24小时后显示API余额。当前新注册账号赠送5美元余额。 - -## 如何给OpenAI API充值? -OpenAI只接受指定地区的信用卡(中国信用卡无法使用)。一些途径举例: -1. Depay虚拟信用卡 -2. 申请国外信用卡 -3. 网上找人代充 - -## 如何使用GPT-4的API访问? -- GPT-4的API访问需要单独申请。到以下地址填写你的信息进入申请队列waitlist(准备好你的OpenAI组织ID):https://openai.com/waitlist/gpt-4-api -之后等待邮件消息。 -- 开通 ChatGPT Plus 不代表有 GPT-4 权限,两者毫无关系。 - -## 如何使用 Azure OpenAI 接口 -请参考:[#371](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/371) - -## 为什么我的 Token 消耗得这么快? -> 相关讨论:[#518](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518) -- 如果你有 GPT 4 的权限,并且日常在使用 GPT 4 api,那么由于 GPT 4 价格是 GPT 3.5 的 15 倍左右,你的账单金额会急速膨胀; -- 如果你在使用 GPT 3.5,并且使用频率并不高,仍然发现自己的账单金额在飞快增加,那么请马上按照以下步骤排查: - - 去 openai 官网查看你的 api key 消费记录,如果你的 token 每小时都有消费,并且每次都消耗了上万 token,那你的 key 一定是泄露了,请立即删除重新生成。**不要在乱七八糟的网站上查余额。** - - 如果你的密码设置很短,比如 5 位以内的字母,那么爆破成本是非常低的,建议你搜索一下 docker 的日志记录,确认是否有人大量尝试了密码组合,关键字:got access code -- 通过上述两个方法就可以定位到你的 token 被快速消耗的原因: - - 如果 openai 消费记录异常,但是 docker 日志没有问题,那么说明是 api key 泄露; - - 如果 docker 日志发现大量 got access code 爆破记录,那么就是密码被爆破了。 - -## API是怎么计费的? -OpenAI网站计费说明:https://openai.com/pricing#language-models -OpenAI根据token数收费,1000个token通常可代表750个英文单词,或500个汉字。输入(Prompt)和输出(Completion)分别统计费用。 -|模型|用户输入(Prompt)计费|模型输出(Completion)计费|每次交互最大token数| -|----|----|----|----| -|gpt-3.5|$0.002 / 1千tokens|$0.002 / 1千tokens|4096| -|gpt-4|$0.03 / 1千tokens|$0.06 / 1千tokens|8192| -|gpt-4-32K|$0.06 / 1千tokens|$0.12 / 1千tokens|32768| - -## gpt-3.5-turbo和gpt3.5-turbo-0301(或者gpt3.5-turbo-mmdd)模型有什么区别? -官方文档说明:https://platform.openai.com/docs/models/gpt-3-5 -- gpt-3.5-turbo是最新的模型,会不断得到更新。 -- gpt-3.5-turbo-0301是3月1日定格的模型快照,不会变化,预期3个月后被新快照替代。 diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/__init__.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Marne/MockingBird/mockingbirdforuse/synthesizer/utils/__init__.py b/spaces/Marne/MockingBird/mockingbirdforuse/synthesizer/utils/__init__.py deleted file mode 100644 index 31a2e41ef0a6c691db888458dbaa1381b0081cdd..0000000000000000000000000000000000000000 --- a/spaces/Marne/MockingBird/mockingbirdforuse/synthesizer/utils/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch - - -_output_ref = None -_replicas_ref = None - - -def data_parallel_workaround(model, *input): - global _output_ref - global _replicas_ref - device_ids = list(range(torch.cuda.device_count())) - output_device = device_ids[0] - replicas = torch.nn.parallel.replicate(model, device_ids) - # input.shape = (num_args, batch, ...) - inputs = torch.nn.parallel.scatter(input, device_ids) - # inputs.shape = (num_gpus, num_args, batch/num_gpus, ...) - replicas = replicas[: len(inputs)] - outputs = torch.nn.parallel.parallel_apply(replicas, inputs) - y_hat = torch.nn.parallel.gather(outputs, output_device) - _output_ref = outputs - _replicas_ref = replicas - return y_hat - - -class ValueWindow: - def __init__(self, window_size=100): - self._window_size = window_size - self._values = [] - - def append(self, x): - self._values = self._values[-(self._window_size - 1) :] + [x] - - @property - def sum(self): - return sum(self._values) - - @property - def count(self): - return len(self._values) - - @property - def average(self): - return self.sum / max(1, self.count) - - def reset(self): - self._values = [] diff --git a/spaces/Mashir0/pximg/README.md b/spaces/Mashir0/pximg/README.md deleted file mode 100644 index 38a3ba10bf8418bf4953c2011df376681de3bee8..0000000000000000000000000000000000000000 --- a/spaces/Mashir0/pximg/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Pximg -emoji: 🖼️ -colorFrom: blue -colorTo: pink -sdk: docker -pinned: false ---- diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py deleted file mode 100644 index ab6b3791692a0d1b5da3601875711710b7bd01ba..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py +++ /dev/null @@ -1,180 +0,0 @@ -import logging - -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule, constant_init, kaiming_init -from annotator.uniformer.mmcv.runner import load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import InvertedResidual, make_divisible - - -@BACKBONES.register_module() -class MobileNetV2(nn.Module): - """MobileNetV2 backbone. - - Args: - widen_factor (float): Width multiplier, multiply number of - channels in each layer by this amount. Default: 1.0. - strides (Sequence[int], optional): Strides of the first block of each - layer. If not specified, default config in ``arch_setting`` will - be used. - dilations (Sequence[int]): Dilation of each layer. - out_indices (None or Sequence[int]): Output from which stages. - Default: (7, ). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - # Parameters to build layers. 3 parameters are needed to construct a - # layer, from left to right: expand_ratio, channel, num_blocks. - arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], - [6, 96, 3], [6, 160, 3], [6, 320, 1]] - - def __init__(self, - widen_factor=1., - strides=(1, 2, 2, 2, 1, 2, 1), - dilations=(1, 1, 1, 1, 1, 1, 1), - out_indices=(1, 2, 4, 6), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - norm_eval=False, - with_cp=False): - super(MobileNetV2, self).__init__() - self.widen_factor = widen_factor - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == len(self.arch_settings) - self.out_indices = out_indices - for index in out_indices: - if index not in range(0, 7): - raise ValueError('the item in out_indices must in ' - f'range(0, 8). But received {index}') - - if frozen_stages not in range(-1, 7): - raise ValueError('frozen_stages must be in range(-1, 7). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.in_channels = make_divisible(32 * widen_factor, 8) - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.layers = [] - - for i, layer_cfg in enumerate(self.arch_settings): - expand_ratio, channel, num_blocks = layer_cfg - stride = self.strides[i] - dilation = self.dilations[i] - out_channels = make_divisible(channel * widen_factor, 8) - inverted_res_layer = self.make_layer( - out_channels=out_channels, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - expand_ratio=expand_ratio) - layer_name = f'layer{i + 1}' - self.add_module(layer_name, inverted_res_layer) - self.layers.append(layer_name) - - def make_layer(self, out_channels, num_blocks, stride, dilation, - expand_ratio): - """Stack InvertedResidual blocks to build a layer for MobileNetV2. - - Args: - out_channels (int): out_channels of block. - num_blocks (int): Number of blocks. - stride (int): Stride of the first block. - dilation (int): Dilation of the first block. - expand_ratio (int): Expand the number of channels of the - hidden layer in InvertedResidual by this ratio. - """ - layers = [] - for i in range(num_blocks): - layers.append( - InvertedResidual( - self.in_channels, - out_channels, - stride if i == 0 else 1, - expand_ratio=expand_ratio, - dilation=dilation if i == 0 else 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - x = self.conv1(x) - - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(MobileNetV2, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/textsnake/_base_textsnake_resnet50_fpn-unet.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/textsnake/_base_textsnake_resnet50_fpn-unet.py deleted file mode 100644 index f1586d61f9886bcb08fe43c95764f944dfd3e099..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/textsnake/_base_textsnake_resnet50_fpn-unet.py +++ /dev/null @@ -1,82 +0,0 @@ -model = dict( - type='TextSnake', - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPN_UNet', in_channels=[256, 512, 1024, 2048], out_channels=32), - det_head=dict( - type='TextSnakeHead', - in_channels=32, - module_loss=dict(type='TextSnakeModuleLoss'), - postprocessor=dict( - type='TextSnakePostprocessor', text_repr_type='poly')), - data_preprocessor=dict( - type='TextDetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=32)) - -train_pipeline = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadOCRAnnotations', - with_bbox=True, - with_polygon=True, - with_label=True), - dict( - type='TorchVisionWrapper', - op='ColorJitter', - brightness=32.0 / 255, - saturation=0.5), - dict( - type='RandomApply', - transforms=[dict(type='RandomCrop', min_side_ratio=0.3)], - prob=0.65), - dict( - type='RandomRotate', - max_angle=20, - pad_with_fixed_color=False, - use_canvas=True), - dict( - type='BoundedScaleAspectJitter', - long_size_bound=800, - short_size_bound=480, - ratio_range=(0.7, 1.3), - aspect_ratio_range=(0.9, 1.1)), - dict( - type='RandomChoice', - transforms=[[ - dict(type='Resize', scale=800, keep_ratio=True), - dict(type='SourceImagePad', target_scale=800) - ], - dict(type='Resize', scale=800, keep_ratio=False)], - prob=[0.4, 0.6]), - dict(type='RandomFlip', prob=0.5, direction='horizontal'), - dict( - type='PackTextDetInputs', - meta_keys=('img_path', 'ori_shape', 'img_shape')) -] - -test_pipeline = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict(type='Resize', scale=(1333, 736), keep_ratio=True), - # add loading annotation after ``Resize`` because ground truth - # does not need to do resize data transform - dict( - type='LoadOCRAnnotations', - with_polygon=True, - with_bbox=True, - with_label=True), - dict( - type='PackTextDetInputs', - meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor')) -] diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/datasets/totaltext.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/datasets/totaltext.py deleted file mode 100644 index 07743439b1dcb688b7bcf5c918609d4e018bc4b7..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/datasets/totaltext.py +++ /dev/null @@ -1,15 +0,0 @@ -totaltext_textrecog_data_root = 'data/totaltext/' - -totaltext_textrecog_train = dict( - type='OCRDataset', - data_root=totaltext_textrecog_data_root, - ann_file='textrecog_train.json', - test_mode=False, - pipeline=None) - -totaltext_textrecog_test = dict( - type='OCRDataset', - data_root=totaltext_textrecog_data_root, - ann_file='textrecog_test.json', - test_mode=True, - pipeline=None) diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/postprocessors/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/postprocessors/__init__.py deleted file mode 100644 index 783958e518b3707736aef40be7c7720ad447424c..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/postprocessors/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base import BaseTextDetPostProcessor -from .db_postprocessor import DBPostprocessor -from .drrg_postprocessor import DRRGPostprocessor -from .fce_postprocessor import FCEPostprocessor -from .pan_postprocessor import PANPostprocessor -from .pse_postprocessor import PSEPostprocessor -from .textsnake_postprocessor import TextSnakePostprocessor - -__all__ = [ - 'PSEPostprocessor', 'PANPostprocessor', 'DBPostprocessor', - 'DRRGPostprocessor', 'FCEPostprocessor', 'TextSnakePostprocessor', - 'BaseTextDetPostProcessor' -] diff --git a/spaces/MuGeminorum/insecta/khandy/numpy_utils.py b/spaces/MuGeminorum/insecta/khandy/numpy_utils.py deleted file mode 100644 index c4a2a17584e99c00f70a86cf73de18b5a8e6216a..0000000000000000000000000000000000000000 --- a/spaces/MuGeminorum/insecta/khandy/numpy_utils.py +++ /dev/null @@ -1,173 +0,0 @@ -import numpy as np - - -def sigmoid(x): - return 1. / (1 + np.exp(-x)) - - -def softmax(x, axis=-1, copy=True): - """ - Args: - copy: Copy x or not. - - Referneces: - `from sklearn.utils.extmath import softmax` - """ - if copy: - x = np.copy(x) - max_val = np.max(x, axis=axis, keepdims=True) - x -= max_val - np.exp(x, x) - sum_exp = np.sum(x, axis=axis, keepdims=True) - x /= sum_exp - return x - - -def log_sum_exp(x, axis=-1, keepdims=False): - """ - References: - numpy.logaddexp - numpy.logaddexp2 - scipy.misc.logsumexp - """ - max_val = np.max(x, axis=axis, keepdims=True) - x -= max_val - np.exp(x, x) - sum_exp = np.sum(x, axis=axis, keepdims=keepdims) - lse = np.log(sum_exp, sum_exp) - if not keepdims: - max_val = np.squeeze(max_val, axis=axis) - return max_val + lse - - -def l2_normalize(x, axis=None, epsilon=1e-12, copy=True): - """L2 normalize an array along an axis. - - Args: - x : array_like of floats - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. - epsilon: float, optional - A small value such as to avoid division by zero. - copy : bool, optional - Copy x or not. - """ - if copy: - x = np.copy(x) - x /= np.maximum(np.linalg.norm(x, axis=axis, keepdims=True), epsilon) - return x - - -def minmax_normalize(x, axis=None, epsilon=1e-12, copy=True): - """minmax normalize an array along a given axis. - - Args: - x : array_like of floats - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. - epsilon: float, optional - A small value such as to avoid division by zero. - copy : bool, optional - Copy x or not. - """ - if copy: - x = np.copy(x) - - minval = np.min(x, axis=axis, keepdims=True) - maxval = np.max(x, axis=axis, keepdims=True) - maxval -= minval - maxval = np.maximum(maxval, epsilon) - - x -= minval - x /= maxval - return x - - -def zscore_normalize(x, mean=None, std=None, axis=None, epsilon=1e-12, copy=True): - """z-score normalize an array along a given axis. - - Args: - x : array_like of floats - Input data. - mean: array_like of floats, optional - mean for z-score - std: array_like of floats, optional - std for z-score - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. - epsilon: float, optional - A small value such as to avoid division by zero. - copy : bool, optional - Copy x or not. - """ - if copy: - x = np.copy(x) - if mean is None: - mean = np.mean(x, axis=axis, keepdims=True) - if std is None: - std = np.std(x, axis=axis, keepdims=True) - mean = np.asarray(mean, dtype=x.dtype) - std = np.asarray(std, dtype=x.dtype) - std = np.maximum(std, epsilon) - - x -= mean - x /= std - return x - - -def get_order_of_magnitude(number): - number = np.where(number == 0, 1, number) - oom = np.floor(np.log10(np.abs(number))) - return oom.astype(np.int32) - - -def top_k(x, k, axis=-1, largest=True, sorted=True): - """Finds values and indices of the k largest/smallest - elements along a given axis. - - Args: - x: numpy ndarray - 1-D or higher with given axis at least k. - k: int - Number of top elements to look for along the given axis. - axis: int - The axis to sort along. - largest: bool - Controls whether to return largest or smallest elements - sorted: bool - If true the resulting k elements will be sorted by the values. - - Returns: - topk_values: - The k largest/smallest elements along the given axis. - topk_indices: - The indices of the k largest/smallest elements along the given axis. - """ - if axis is None: - axis_size = x.size - else: - axis_size = x.shape[axis] - assert 1 <= k <= axis_size - - x = np.asanyarray(x) - if largest: - index_array = np.argpartition(x, axis_size-k, axis=axis) - topk_indices = np.take(index_array, -np.arange(k)-1, axis=axis) - else: - index_array = np.argpartition(x, k-1, axis=axis) - topk_indices = np.take(index_array, np.arange(k), axis=axis) - topk_values = np.take_along_axis(x, topk_indices, axis=axis) - if sorted: - sorted_indices_in_topk = np.argsort(topk_values, axis=axis) - if largest: - sorted_indices_in_topk = np.flip(sorted_indices_in_topk, axis=axis) - sorted_topk_values = np.take_along_axis( - topk_values, sorted_indices_in_topk, axis=axis) - sorted_topk_indices = np.take_along_axis( - topk_indices, sorted_indices_in_topk, axis=axis) - return sorted_topk_values, sorted_topk_indices - return topk_values, topk_indices - - \ No newline at end of file diff --git a/spaces/NCTCMumbai/NCTC/models/research/audioset/README.md b/spaces/NCTCMumbai/NCTC/models/research/audioset/README.md deleted file mode 100644 index c5a39b28ec1778a558a74692817b40d5d906e18c..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/audioset/README.md +++ /dev/null @@ -1,55 +0,0 @@ -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Models for AudioSet: A Large Scale Dataset of Audio Events - -This repository provides models and supporting code associated with -[AudioSet](http://g.co/audioset), a dataset of over 2 million human-labeled -10-second YouTube video soundtracks, with labels taken from an ontology of -more than 600 audio event classes. - -AudioSet was -[released](https://research.googleblog.com/2017/03/announcing-audioset-dataset-for-audio.html) -in March 2017 by Google's Sound Understanding team to provide a common -large-scale evaluation task for audio event detection as well as a starting -point for a comprehensive vocabulary of sound events. - -For more details about AudioSet and the various models we have trained, please -visit the [AudioSet website](http://g.co/audioset) and read our papers: - -* Gemmeke, J. et. al., - [AudioSet: An ontology and human-labelled dataset for audio events](https://research.google.com/pubs/pub45857.html), - ICASSP 2017 - -* Hershey, S. et. al., - [CNN Architectures for Large-Scale Audio Classification](https://research.google.com/pubs/pub45611.html), - ICASSP 2017 - -If you use any of our pre-trained models in your published research, we ask that -you cite [CNN Architectures for Large-Scale Audio Classification](https://research.google.com/pubs/pub45611.html). -If you use the AudioSet dataset or the released embeddings of AudioSet segments, -please cite -[AudioSet: An ontology and human-labelled dataset for audio events](https://research.google.com/pubs/pub45857.html). - -## Contact - -For general questions about AudioSet and these models, please use the -[audioset-users@googlegroups.com](https://groups.google.com/forum/#!forum/audioset-users) -mailing list. - -For technical problems with the released model and code, please open an issue on -the [tensorflow/models issue tracker](https://github.com/tensorflow/models/issues) -and __*assign to @plakal and @dpwe*__. Please note that because the issue tracker -is shared across all models released by Google, we won't be notified about an -issue unless you explicitly @-mention us (@plakal and @dpwe) or assign the issue -to us. - -## Credits - -Original authors and reviewers of the code in this package include (in -alphabetical order): - -* DAn Ellis -* Shawn Hershey -* Aren Jansen -* Manoj Plakal diff --git a/spaces/NMEX/rvc-hoyo-game/infer_pack/models.py b/spaces/NMEX/rvc-hoyo-game/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/NMEX/rvc-hoyo-game/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/NanoT/demo2/app.py b/spaces/NanoT/demo2/app.py deleted file mode 100644 index 2fc93a216b7d20e6de1588699c1e5b8e94afd30d..0000000000000000000000000000000000000000 --- a/spaces/NanoT/demo2/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import gradio as gr -import yolov5 -import numpy as np -from PIL import Image -from PIL import ImageDraw -import os -import requests - -model = yolov5.load('https://huggingface.co/keremberke/yolov5m-license-plate/resolve/main/best.pt') - -# set model parameters -model.conf = 0.40 # NMS confidence threshold -model.iou = 0.45 # NMS IoU threshold -model.agnostic = False # NMS class-agnostic -model.multi_label = False # NMS multiple labels per box -model.max_det = 1000 # maximum number of detections per image - -API_URL = "https://api-inference.huggingface.co/models/kha-white/manga-ocr-base" -headers = {"Authorization": f"Bearer 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"} - -def query(filename): - with open(filename, "rb") as f: - data = f.read() - response = requests.post(API_URL, headers=headers, data=data) - return response.json() - - -def crop(detections, original_image): - # Create a copy of the original image to draw bounding boxes - image_with_boxes = original_image.copy() - - # Get the detected predictions - predictions = detections.pred[0] - boxes = predictions[:, :4] # x1, y1, x2, y2 - scores = predictions[:, 4] - categories = predictions[:, 5] - - # Convert the NumPy array to a PIL Image - image_with_boxes = Image.fromarray(np.uint8(image_with_boxes)) - - # Get the ImageDraw object - draw = ImageDraw.Draw(image_with_boxes) - - for box, score, category in zip(boxes, scores, categories): - # Extract the bounding box coordinates - x_min, y_min, x_max, y_max = box - x_min, y_min, x_max, y_max = int(x_min.item()), int(y_min.item()), int(x_max.item()), int(y_max.item()) - cropped_image = image_with_boxes.crop((x_min,y_min,x_max,y_max)) - cropped_image.save('output_image.jpg') - - # Draw the bounding box rectangle - draw.rectangle([x_min, y_min, x_max, y_max], outline="red", width=2) - - # Optionally, add a label and confidence score near the bounding box - # label_text = f"Class {int(category)} - Score: {score:.2f}" - # draw.text((x_min, y_min), label_text, fill="red") - - return cropped_image - -def getNumber(img): - # perform inference - results = model(img, size=640) - -# inference with test time augmentation - results = model(img, augment=True) - - # parse results - predictions = results.pred[0] - boxes = predictions[:, :4] # x1, y1, x2, y2 - scores = predictions[:, 4] - categories = predictions[:, 5] - processed_img=crop(results, img) - output=query("output_image.jpg") - max_output_size = (640, 640) # Define the maximum size you want for the output image - #processed_img.thumbnail(max_output_size, Image.ANTIALIAS) - st=str(output[0]) - sp=st.split(':')[1].split('}')[0] - sdash=sp[2:] - sd=sdash[:-1] - return sd - -input_image = gr.inputs.Image() -gr.Interface(getNumber, inputs=input_image, outputs="text",examples=["image.jpg","image2.jpg"]).launch(debug=True) - -#link for the dataset: https://huggingface.co/datasets/keremberke/license-plate-object-detection - - diff --git a/spaces/NimaBoscarino/climategan/train.py b/spaces/NimaBoscarino/climategan/train.py deleted file mode 100644 index 1b0377c808df36b60534784596e37088ba236acb..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/climategan/train.py +++ /dev/null @@ -1,195 +0,0 @@ -import logging -import os -from pathlib import Path -from time import sleep, time - -import hydra -import yaml -from addict import Dict -from comet_ml import ExistingExperiment, Experiment -from omegaconf import OmegaConf - -from climategan.trainer import Trainer -from climategan.utils import ( - comet_kwargs, - copy_run_files, - env_to_path, - find_existing_training, - flatten_opts, - get_existing_comet_id, - get_git_branch, - get_git_revision_hash, - get_increased_path, - kill_job, - load_opts, - pprint, -) - -logging.basicConfig() -logging.getLogger().setLevel(logging.ERROR) - -hydra_config_path = Path(__file__).resolve().parent / "shared/trainer/config.yaml" - - -# requires hydra-core==0.11.3 and omegaconf==1.4.1 -@hydra.main(config_path=hydra_config_path, strict=False) -def main(opts): - """ - Opts prevalence: - 1. Load file specified in args.default (or shared/trainer/defaults.yaml - if none is provided) - 2. Update with file specified in args.config (or no update if none is provided) - 3. Update with parsed command-line arguments - - e.g. - `python train.py args.config=config/large-lr.yaml data.loaders.batch_size=10` - loads defaults, overrides with values in large-lr.yaml and sets batch_size to 10 - """ - - # ----------------------------- - # ----- Parse arguments ----- - # ----------------------------- - - hydra_opts = Dict(OmegaConf.to_container(opts)) - args = hydra_opts.pop("args", None) - auto_resumed = {} - - config_path = args.config - - if hydra_opts.train.resume: - out_ = str(env_to_path(hydra_opts.output_path)) - config_path = Path(out_) / "opts.yaml" - if not config_path.exists(): - config_path = None - print("WARNING: could not reuse the opts in {}".format(out_)) - - default = args.default or Path(__file__).parent / "shared/trainer/defaults.yaml" - - # ----------------------- - # ----- Load opts ----- - # ----------------------- - - opts = load_opts(config_path, default=default, commandline_opts=hydra_opts) - if args.resume: - opts.train.resume = True - - opts.jobID = os.environ.get("SLURM_JOBID") - opts.slurm_partition = os.environ.get("SLURM_JOB_PARTITION") - opts.output_path = str(env_to_path(opts.output_path)) - print("Config output_path:", opts.output_path) - - exp = comet_previous_id = None - - # ------------------------------- - # ----- Check output_path ----- - # ------------------------------- - - # Auto-continue if same slurm job ID (=job was requeued) - if not opts.train.resume and opts.train.auto_resume: - print("\n\nTrying to auto-resume...") - existing_path = find_existing_training(opts) - if existing_path is not None and existing_path.exists(): - auto_resumed["original output_path"] = str(opts.output_path) - auto_resumed["existing_path"] = str(existing_path) - opts.train.resume = True - opts.output_path = str(existing_path) - - # Still not resuming: creating new output path - if not opts.train.resume: - opts.output_path = str(get_increased_path(opts.output_path)) - Path(opts.output_path).mkdir(parents=True, exist_ok=True) - - # Copy the opts's sbatch_file to output_path - copy_run_files(opts) - # store git hash - opts.git_hash = get_git_revision_hash() - opts.git_branch = get_git_branch() - - if not args.no_comet: - # ---------------------------------- - # ----- Set Comet Experiment ----- - # ---------------------------------- - - if opts.train.resume: - # Is resuming: get existing comet exp id - assert Path(opts.output_path).exists(), "Output_path does not exist" - - comet_previous_id = get_existing_comet_id(opts.output_path) - # Continue existing experiment - if comet_previous_id is None: - print("WARNING could not retreive previous comet id") - print(f"from {opts.output_path}") - else: - print("Continuing previous experiment", comet_previous_id) - auto_resumed["continuing exp id"] = comet_previous_id - exp = ExistingExperiment( - previous_experiment=comet_previous_id, **comet_kwargs - ) - print("Comet Experiment resumed") - - if exp is None: - # Create new experiment - print("Starting new experiment") - exp = Experiment(project_name="climategan", **comet_kwargs) - exp.log_asset_folder( - str(Path(__file__).parent / "climategan"), - recursive=True, - log_file_name=True, - ) - exp.log_asset(str(Path(__file__))) - - # Log note - if args.note: - exp.log_parameter("note", args.note) - - # Merge and log tags - if args.comet_tags or opts.comet.tags: - tags = set([f"branch:{opts.git_branch}"]) - if args.comet_tags: - tags.update(args.comet_tags) - if opts.comet.tags: - tags.update(opts.comet.tags) - opts.comet.tags = list(tags) - print("Logging to comet.ml with tags", opts.comet.tags) - exp.add_tags(opts.comet.tags) - - # Log all opts - exp.log_parameters(flatten_opts(opts)) - if auto_resumed: - exp.log_text("\n".join(f"{k:20}: {v}" for k, v in auto_resumed.items())) - - # allow some time for comet to get its url - sleep(1) - - # Save comet exp url - url_path = get_increased_path(Path(opts.output_path) / "comet_url.txt") - with open(url_path, "w") as f: - f.write(exp.url) - - # Save config file - opts_path = get_increased_path(Path(opts.output_path) / "opts.yaml") - with (opts_path).open("w") as f: - yaml.safe_dump(opts.to_dict(), f) - - pprint("Running model in", opts.output_path) - - # ------------------- - # ----- Train ----- - # ------------------- - - trainer = Trainer(opts, comet_exp=exp, verbose=1) - trainer.logger.time.start_time = time() - trainer.setup() - trainer.train() - - # ----------------------------- - # ----- End of training ----- - # ----------------------------- - - pprint("Done training") - kill_job(opts.jobID) - - -if __name__ == "__main__": - - main() diff --git a/spaces/NoCrypt/mikuTTS/config.py b/spaces/NoCrypt/mikuTTS/config.py deleted file mode 100644 index 4038dad0ac30ba03b6271499f4e37bbc745a2032..0000000000000000000000000000000000000000 --- a/spaces/NoCrypt/mikuTTS/config.py +++ /dev/null @@ -1,115 +0,0 @@ -import argparse -import sys -import torch -from multiprocessing import cpu_count - - -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - ( - self.python_cmd, - self.listen_port, - self.iscolab, - self.noparallel, - self.noautoopen, - ) = self.arg_parse() - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - @staticmethod - def arg_parse() -> tuple: - exe = sys.executable or "python" - parser = argparse.ArgumentParser() - parser.add_argument("--port", type=int, default=7865, help="Listen port") - parser.add_argument("--pycmd", type=str, default=exe, help="Python command") - parser.add_argument("--colab", action="store_true", help="Launch in colab") - parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" - ) - parser.add_argument( - "--noautoopen", - action="store_true", - help="Do not open in browser automatically", - ) - cmd_opts = parser.parse_args() - - cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865 - - return ( - cmd_opts.pycmd, - cmd_opts.port, - cmd_opts.colab, - cmd_opts.noparallel, - cmd_opts.noautoopen, - ) - - # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. - # check `getattr` and try it for compatibility - @staticmethod - def has_mps() -> bool: - if not torch.backends.mps.is_available(): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("Found GPU", self.gpu_name, ", force to fp32") - self.is_half = False - else: - print("Found GPU", self.gpu_name) - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - elif self.has_mps(): - print("No supported Nvidia GPU found, use MPS instead") - self.device = "mps" - self.is_half = False - else: - print("No supported Nvidia GPU found, use CPU instead") - self.device = "cpu" - self.is_half = False - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max diff --git a/spaces/OAOA/DifFace/models/fp16_util.py b/spaces/OAOA/DifFace/models/fp16_util.py deleted file mode 100644 index 23e0418153143200a718f56077b3360f30f4c663..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/models/fp16_util.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Helpers to train with 16-bit precision. -""" - -import torch.nn as nn -from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors - - -def convert_module_to_f16(l): - """ - Convert primitive modules to float16. - """ - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - l.weight.data = l.weight.data.half() - l.bias.data = l.bias.data.half() - - -def convert_module_to_f32(l): - """ - Convert primitive modules to float32, undoing convert_module_to_f16(). - """ - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - l.weight.data = l.weight.data.float() - l.bias.data = l.bias.data.float() - - -def make_master_params(model_params): - """ - Copy model parameters into a (differently-shaped) list of full-precision - parameters. - """ - master_params = _flatten_dense_tensors( - [param.detach().float() for param in model_params] - ) - master_params = nn.Parameter(master_params) - master_params.requires_grad = True - return [master_params] - - -def model_grads_to_master_grads(model_params, master_params): - """ - Copy the gradients from the model parameters into the master parameters - from make_master_params(). - """ - master_params[0].grad = _flatten_dense_tensors( - [param.grad.data.detach().float() for param in model_params] - ) - - -def master_params_to_model_params(model_params, master_params): - """ - Copy the master parameter data back into the model parameters. - """ - # Without copying to a list, if a generator is passed, this will - # silently not copy any parameters. - model_params = list(model_params) - - for param, master_param in zip( - model_params, unflatten_master_params(model_params, master_params) - ): - param.detach().copy_(master_param) - - -def unflatten_master_params(model_params, master_params): - """ - Unflatten the master parameters to look like model_params. - """ - return _unflatten_dense_tensors(master_params[0].detach(), model_params) - - -def zero_grad(model_params): - for param in model_params: - # Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group - if param.grad is not None: - param.grad.detach_() - param.grad.zero_() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/quantization/scalar/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/quantization/scalar/__init__.py deleted file mode 100644 index 143834f3d036780eb6844c82f0c6f2d10cfe2f61..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/quantization/scalar/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .utils import quantize_model_ # NOQA diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/new/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/new/README.md deleted file mode 100644 index 5fa0e97245d3ba6db69d11222261b0644960183d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/new/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Flashlight Decoder - -This script runs decoding for pre-trained speech recognition models. - -## Usage - -Assuming a few variables: - -```bash -checkpoint= -data= -lm_model= -lexicon= -``` - -Example usage for decoding a fine-tuned Wav2Vec model: - -```bash -python $FAIRSEQ_ROOT/examples/speech_recognition/new/infer.py --multirun \ - task=audio_pretraining \ - task.data=$data \ - task.labels=ltr \ - common_eval.path=$checkpoint \ - decoding.type=kenlm \ - decoding.lexicon=$lexicon \ - decoding.lmpath=$lm_model \ - dataset.gen_subset=dev_clean,dev_other,test_clean,test_other -``` - -Example usage for using Ax to sweep WER parameters (requires `pip install hydra-ax-sweeper`): - -```bash -python $FAIRSEQ_ROOT/examples/speech_recognition/new/infer.py --multirun \ - hydra/sweeper=ax \ - task=audio_pretraining \ - task.data=$data \ - task.labels=ltr \ - common_eval.path=$checkpoint \ - decoding.type=kenlm \ - decoding.lexicon=$lexicon \ - decoding.lmpath=$lm_model \ - dataset.gen_subset=dev_other -``` diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/distributed/module_proxy_wrapper.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/distributed/module_proxy_wrapper.py deleted file mode 100644 index fc2c6f8c718f2ac8ece308e50f7ba74a05474f4a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/distributed/module_proxy_wrapper.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from torch import nn - - -class ModuleProxyWrapper(nn.Module): - """ - Wrap a DistributedDataParallel module and forward requests for missing - attributes to the module wrapped by DDP (the twice-wrapped module). - Also forward calls to :func:`state_dict` and :func:`load_state_dict`. - - Usage:: - - module.xyz = "hello world" - wrapped_module = DistributedDataParallel(module, **ddp_args) - wrapped_module = ModuleProxyWrapper(wrapped_module) - assert wrapped_module.xyz == "hello world" - assert wrapped_module.state_dict().keys() == module.state_dict().keys() - - Args: - module (nn.Module): module to wrap - """ - - def __init__(self, module: nn.Module): - super().__init__() - assert hasattr(module, "module"), \ - "ModuleProxyWrapper expects input to wrap another module" - self.module = module - - def __getattr__(self, name): - """Forward missing attributes to twice-wrapped module.""" - try: - # defer to nn.Module's logic - return super().__getattr__(name) - except AttributeError: - try: - # forward to the once-wrapped module - return getattr(self.module, name) - except AttributeError: - # forward to the twice-wrapped module - return getattr(self.module.module, name) - - def state_dict(self, *args, **kwargs): - """Forward to the twice-wrapped module.""" - return self.module.module.state_dict(*args, **kwargs) - - def load_state_dict(self, *args, **kwargs): - """Forward to the twice-wrapped module.""" - return self.module.module.load_state_dict(*args, **kwargs) - - def forward(self, *args, **kwargs): - return self.module(*args, **kwargs) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/backtranslation/deduplicate_lines.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/backtranslation/deduplicate_lines.py deleted file mode 100644 index 50e458328c80b71c42a66d473381ca7e98d294da..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/backtranslation/deduplicate_lines.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import fileinput -import hashlib -import sys -from multiprocessing import Pool - - -def get_hashes_and_lines(raw_line): - hash = hashlib.md5(raw_line).hexdigest() - return hash, raw_line - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--workers", type=int, default=10) - parser.add_argument("files", nargs="*", help="input files") - args = parser.parse_args() - - seen = set() - with fileinput.input(args.files, mode="rb") as h: - pool = Pool(args.workers) - results = pool.imap_unordered(get_hashes_and_lines, h, 1000) - for i, (hash, raw_line) in enumerate(results): - if hash not in seen: - seen.add(hash) - sys.stdout.buffer.write(raw_line) - if i % 1000000 == 0: - print(i, file=sys.stderr, end="", flush=True) - elif i % 100000 == 0: - print(".", file=sys.stderr, end="", flush=True) - print(file=sys.stderr, flush=True) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/utils/cider/pyciderevalcap/cider/cider_scorer.py b/spaces/OFA-Sys/OFA-vqa/utils/cider/pyciderevalcap/cider/cider_scorer.py deleted file mode 100644 index d7f9505916f2210617cc529bf3c05acfa06d5a62..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/utils/cider/pyciderevalcap/cider/cider_scorer.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/env python -# Tsung-Yi Lin -# Ramakrishna Vedantam -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -import six -from six.moves import cPickle -from collections import defaultdict -import numpy as np -import math -import os - -def precook(s, n=4, out=False): - """ - Takes a string as input and returns an object that can be given to - either cook_refs or cook_test. This is optional: cook_refs and cook_test - can take string arguments as well. - :param s: string : sentence to be converted into ngrams - :param n: int : number of ngrams for which representation is calculated - :return: term frequency vector for occuring ngrams - """ - words = s.split() - counts = defaultdict(int) - for k in range(1,n+1): - for i in range(len(words)-k+1): - ngram = tuple(words[i:i+k]) - counts[ngram] += 1 - return counts - -def cook_refs(refs, n=4): ## lhuang: oracle will call with "average" - '''Takes a list of reference sentences for a single segment - and returns an object that encapsulates everything that BLEU - needs to know about them. - :param refs: list of string : reference sentences for some image - :param n: int : number of ngrams for which (ngram) representation is calculated - :return: result (list of dict) - ''' - return [precook(ref, n) for ref in refs] - -def cook_test(test, n=4): - '''Takes a test sentence and returns an object that - encapsulates everything that BLEU needs to know about it. - :param test: list of string : hypothesis sentence for some image - :param n: int : number of ngrams for which (ngram) representation is calculated - :return: result (dict) - ''' - return precook(test, n, True) - -class CiderScorer(object): - """CIDEr scorer. - """ - - def copy(self): - ''' copy the refs.''' - new = CiderScorer(n=self.n) - new.ctest = copy.copy(self.ctest) - new.crefs = copy.copy(self.crefs) - return new - - def __init__(self, df_mode="corpus", test=None, refs=None, n=4, sigma=6.0): - ''' singular instance ''' - self.n = n - self.sigma = sigma - self.crefs = [] - self.ctest = [] - self.df_mode = df_mode - self.ref_len = None - if self.df_mode != "corpus": - pkl_file = cPickle.load(open(os.path.join('data', df_mode + '.p'),'rb'), **(dict(encoding='latin1') if six.PY3 else {})) - self.ref_len = np.log(float(pkl_file['ref_len'])) - self.document_frequency = pkl_file['document_frequency'] - self.cook_append(test, refs) - - def clear(self): - self.crefs = [] - self.ctest = [] - - def cook_append(self, test, refs): - '''called by constructor and __iadd__ to avoid creating new instances.''' - - if refs is not None: - self.crefs.append(cook_refs(refs)) - if test is not None: - self.ctest.append(cook_test(test)) ## N.B.: -1 - else: - self.ctest.append(None) # lens of crefs and ctest have to match - - def size(self): - assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest)) - return len(self.crefs) - - def __iadd__(self, other): - '''add an instance (e.g., from another sentence).''' - - if type(other) is tuple: - ## avoid creating new CiderScorer instances - self.cook_append(other[0], other[1]) - else: - self.ctest.extend(other.ctest) - self.crefs.extend(other.crefs) - - return self - def compute_doc_freq(self): - ''' - Compute term frequency for reference data. - This will be used to compute idf (inverse document frequency later) - The term frequency is stored in the object - :return: None - ''' - for refs in self.crefs: - # refs, k ref captions of one image - for ngram in set([ngram for ref in refs for (ngram,count) in ref.items()]): - self.document_frequency[ngram] += 1 - # maxcounts[ngram] = max(maxcounts.get(ngram,0), count) - - def compute_cider(self): - def counts2vec(cnts): - """ - Function maps counts of ngram to vector of tfidf weights. - The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights. - The n-th entry of array denotes length of n-grams. - :param cnts: - :return: vec (array of dict), norm (array of float), length (int) - """ - vec = [defaultdict(float) for _ in range(self.n)] - length = 0 - norm = [0.0 for _ in range(self.n)] - for (ngram,term_freq) in cnts.items(): - # give word count 1 if it doesn't appear in reference corpus - df = np.log(max(1.0, self.document_frequency[ngram])) - # ngram index - n = len(ngram)-1 - # tf (term_freq) * idf (precomputed idf) for n-grams - vec[n][ngram] = float(term_freq)*(self.ref_len - df) - # compute norm for the vector. the norm will be used for - # computing similarity - norm[n] += pow(vec[n][ngram], 2) - - if n == 1: - length += term_freq - norm = [np.sqrt(n) for n in norm] - return vec, norm, length - - def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref): - ''' - Compute the cosine similarity of two vectors. - :param vec_hyp: array of dictionary for vector corresponding to hypothesis - :param vec_ref: array of dictionary for vector corresponding to reference - :param norm_hyp: array of float for vector corresponding to hypothesis - :param norm_ref: array of float for vector corresponding to reference - :param length_hyp: int containing length of hypothesis - :param length_ref: int containing length of reference - :return: array of score for each n-grams cosine similarity - ''' - delta = float(length_hyp - length_ref) - # measure consine similarity - val = np.array([0.0 for _ in range(self.n)]) - for n in range(self.n): - # ngram - for (ngram,count) in vec_hyp[n].items(): - val[n] += vec_hyp[n][ngram] * vec_ref[n][ngram] - - if (norm_hyp[n] != 0) and (norm_ref[n] != 0): - val[n] /= (norm_hyp[n]*norm_ref[n]) - - assert(not math.isnan(val[n])) - return val - - # compute log reference length - if self.df_mode == "corpus": - self.ref_len = np.log(float(len(self.crefs))) - - scores = [] - for test, refs in zip(self.ctest, self.crefs): - # compute vector for test captions - vec, norm, length = counts2vec(test) - # compute vector for ref captions - score = np.array([0.0 for _ in range(self.n)]) - for ref in refs: - vec_ref, norm_ref, length_ref = counts2vec(ref) - score += sim(vec, vec_ref, norm, norm_ref, length, length_ref) - # change by vrama91 - mean of ngram scores, instead of sum - score_avg = np.mean(score) - # divide by number of references - score_avg /= len(refs) - # multiply score by 10 - score_avg *= 10.0 - # append score of an image to the score list - scores.append(score_avg) - return scores - - def compute_score(self, option=None, verbose=0): - # compute idf - if self.df_mode == "corpus": - self.document_frequency = defaultdict(float) - self.compute_doc_freq() - # assert to check document frequency - assert(len(self.ctest) >= max(self.document_frequency.values())) - # import json for now and write the corresponding files - # compute cider score - score = self.compute_cider() - # debug - # print score - return np.mean(np.array(score)), np.array(score) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/grit/modeling/backbone/utils.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/grit/modeling/backbone/utils.py deleted file mode 100644 index e71db21f1223c87cceeb422a70888f7bac42bb18..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/grit/modeling/backbone/utils.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# This code is from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/utils.py -import math -import torch -import torch.nn as nn -import torch.nn.functional as F - -__all__ = [ - "window_partition", - "window_unpartition", - "add_decomposed_rel_pos", - "get_abs_pos", - "PatchEmbed", -] - -def window_partition(x, window_size): - """ - Partition into non-overlapping windows with padding if needed. - Args: - x (tensor): input tokens with [B, H, W, C]. - window_size (int): window size. - - Returns: - windows: windows after partition with [B * num_windows, window_size, window_size, C]. - (Hp, Wp): padded height and width before partition - """ - B, H, W, C = x.shape - - pad_h = (window_size - H % window_size) % window_size - pad_w = (window_size - W % window_size) % window_size - if pad_h > 0 or pad_w > 0: - x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) - Hp, Wp = H + pad_h, W + pad_w - - x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows, (Hp, Wp) - - -def window_unpartition(windows, window_size, pad_hw, hw): - """ - Window unpartition into original sequences and removing padding. - Args: - x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. - window_size (int): window size. - pad_hw (Tuple): padded height and width (Hp, Wp). - hw (Tuple): original height and width (H, W) before padding. - - Returns: - x: unpartitioned sequences with [B, H, W, C]. - """ - Hp, Wp = pad_hw - H, W = hw - B = windows.shape[0] // (Hp * Wp // window_size // window_size) - x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) - - if Hp > H or Wp > W: - x = x[:, :H, :W, :].contiguous() - return x - - -def get_rel_pos(q_size, k_size, rel_pos): - """ - Get relative positional embeddings according to the relative positions of - query and key sizes. - Args: - q_size (int): size of query q. - k_size (int): size of key k. - rel_pos (Tensor): relative position embeddings (L, C). - - Returns: - Extracted positional embeddings according to relative positions. - """ - max_rel_dist = int(2 * max(q_size, k_size) - 1) - # Interpolate rel pos if needed. - if rel_pos.shape[0] != max_rel_dist: - # Interpolate rel pos. - rel_pos_resized = F.interpolate( - rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), - size=max_rel_dist, - mode="linear", - ) - rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) - else: - rel_pos_resized = rel_pos - - # Scale the coords with short length if shapes for q and k are different. - q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) - k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) - relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) - - return rel_pos_resized[relative_coords.long()] - - -def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size): - """ - Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. - https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 - Args: - attn (Tensor): attention map. - q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). - rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. - rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. - q_size (Tuple): spatial sequence size of query q with (q_h, q_w). - k_size (Tuple): spatial sequence size of key k with (k_h, k_w). - - Returns: - attn (Tensor): attention map with added relative positional embeddings. - """ - q_h, q_w = q_size - k_h, k_w = k_size - Rh = get_rel_pos(q_h, k_h, rel_pos_h) - Rw = get_rel_pos(q_w, k_w, rel_pos_w) - - B, _, dim = q.shape - r_q = q.reshape(B, q_h, q_w, dim) - rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) - rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) - - attn = ( - attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] - ).view(B, q_h * q_w, k_h * k_w) - - return attn - - -def get_abs_pos(abs_pos, has_cls_token, hw): - """ - Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token - dimension for the original embeddings. - Args: - abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). - has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. - hw (Tuple): size of input image tokens. - - Returns: - Absolute positional embeddings after processing with shape (1, H, W, C) - """ - h, w = hw - if has_cls_token: - abs_pos = abs_pos[:, 1:] - xy_num = abs_pos.shape[1] - size = int(math.sqrt(xy_num)) - assert size * size == xy_num - - if size != h or size != w: - new_abs_pos = F.interpolate( - abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2), - size=(h, w), - mode="bicubic", - align_corners=False, - ) - - return new_abs_pos.permute(0, 2, 3, 1) - else: - return abs_pos.reshape(1, h, w, -1) - - -class PatchEmbed(nn.Module): - """ - Image to Patch Embedding. - """ - - def __init__( - self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768 - ): - """ - Args: - kernel_size (Tuple): kernel size of the projection layer. - stride (Tuple): stride of the projection layer. - padding (Tuple): padding size of the projection layer. - in_chans (int): Number of input image channels. - embed_dim (int): embed_dim (int): Patch embedding dimension. - """ - super().__init__() - - self.proj = nn.Conv2d( - in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding - ) - - def forward(self, x): - x = self.proj(x) - # B C H W -> B H W C - x = x.permute(0, 2, 3, 1) - return x diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/structures/test_masks.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/structures/test_masks.py deleted file mode 100644 index 7991eb0b35724f2f2f402d788a273d68b7cad5f2..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/structures/test_masks.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import unittest -import torch - -from detectron2.structures.masks import BitMasks, PolygonMasks, polygons_to_bitmask - - -class TestBitMask(unittest.TestCase): - def test_get_bounding_box(self): - masks = torch.tensor( - [ - [ - [False, False, False, True], - [False, False, True, True], - [False, True, True, False], - [False, True, True, False], - ], - [ - [False, False, False, False], - [False, False, True, False], - [False, True, True, False], - [False, True, True, False], - ], - torch.zeros(4, 4), - ] - ) - bitmask = BitMasks(masks) - box_true = torch.tensor([[1, 0, 4, 4], [1, 1, 3, 4], [0, 0, 0, 0]], dtype=torch.float32) - box = bitmask.get_bounding_boxes() - self.assertTrue(torch.all(box.tensor == box_true).item()) - - for box in box_true: - poly = box[[0, 1, 2, 1, 2, 3, 0, 3]].numpy() - mask = polygons_to_bitmask([poly], 4, 4) - reconstruct_box = BitMasks(mask[None, :, :]).get_bounding_boxes()[0].tensor - self.assertTrue(torch.all(box == reconstruct_box).item()) - - reconstruct_box = PolygonMasks([[poly]]).get_bounding_boxes()[0].tensor - self.assertTrue(torch.all(box == reconstruct_box).item()) - - def test_from_empty_polygons(self): - masks = BitMasks.from_polygon_masks([], 100, 100) - self.assertEqual(masks.tensor.shape, (0, 100, 100)) - - def test_getitem(self): - masks = BitMasks(torch.ones(3, 10, 10)) - self.assertEqual(masks[1].tensor.shape, (1, 10, 10)) - self.assertEqual(masks[1:3].tensor.shape, (2, 10, 10)) - self.assertEqual(masks[torch.tensor([True, False, False])].tensor.shape, (1, 10, 10)) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/PHZane/emrwa/tokenizations/tokenization_bert.py b/spaces/PHZane/emrwa/tokenizations/tokenization_bert.py deleted file mode 100644 index 75d7a3c303dcfa0c18958807a30e02f49072aeb6..0000000000000000000000000000000000000000 --- a/spaces/PHZane/emrwa/tokenizations/tokenization_bert.py +++ /dev/null @@ -1,436 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes.""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -import collections -import logging -import os -import unicodedata -from io import open - -from transformers.tokenization_utils import PreTrainedTokenizer - -logger = logging.getLogger(__name__) - -VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} - -PRETRAINED_VOCAB_FILES_MAP = { - 'vocab_file': - { - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", - 'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt", - 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt", - 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt", - 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt", - 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt", - 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt", - } -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - 'bert-base-uncased': 512, - 'bert-large-uncased': 512, - 'bert-base-cased': 512, - 'bert-large-cased': 512, - 'bert-base-multilingual-uncased': 512, - 'bert-base-multilingual-cased': 512, - 'bert-base-chinese': 512, - 'bert-base-german-cased': 512, - 'bert-large-uncased-whole-word-masking': 512, - 'bert-large-cased-whole-word-masking': 512, - 'bert-large-uncased-whole-word-masking-finetuned-squad': 512, - 'bert-large-cased-whole-word-masking-finetuned-squad': 512, - 'bert-base-cased-finetuned-mrpc': 512, -} - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - with open(vocab_file, "r", encoding="utf-8") as reader: - tokens = reader.readlines() - for index, token in enumerate(tokens): - token = token.rstrip('\n') - vocab[token] = index - return vocab - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class BertTokenizer(PreTrainedTokenizer): - r""" - Constructs a BertTokenizer. - :class:`~pytorch_pretrained_bert.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece - - Args: - vocab_file: Path to a one-wordpiece-per-line vocabulary file - do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False - do_basic_tokenize: Whether to do basic tokenization before wordpiece. - max_len: An artificial maximum length to truncate tokenized_doupo sequences to; Effective maximum length is always the - minimum of this value (if specified) and the underlying BERT model's sequence length. - never_split: List of tokens which will never be split during tokenization. Only has an effect when - do_wordpiece_only=False - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - - def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, - unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", - mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs): - """Constructs a BertTokenizer. - - Args: - **vocab_file**: Path to a one-wordpiece-per-line vocabulary file - **do_lower_case**: (`optional`) boolean (default True) - Whether to lower case the input - Only has an effect when do_basic_tokenize=True - **do_basic_tokenize**: (`optional`) boolean (default True) - Whether to do basic tokenization before wordpiece. - **never_split**: (`optional`) list of string - List of tokens which will never be split during tokenization. - Only has an effect when do_basic_tokenize=True - **tokenize_chinese_chars**: (`optional`) boolean (default True) - Whether to tokenize Chinese characters. - This should likely be desactivated for Japanese: - see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328 - """ - super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, - pad_token=pad_token, cls_token=cls_token, - mask_token=mask_token, **kwargs) - if not os.path.isfile(vocab_file): - raise ValueError( - "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " - "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) - self.vocab = load_vocab(vocab_file) - self.ids_to_tokens = collections.OrderedDict( - [(ids, tok) for tok, ids in self.vocab.items()]) - self.do_basic_tokenize = do_basic_tokenize - if do_basic_tokenize: - self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, - never_split=never_split, - tokenize_chinese_chars=tokenize_chinese_chars) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) - - @property - def vocab_size(self): - return len(self.vocab) - - def _tokenize(self, text): - split_tokens = [] - if self.do_basic_tokenize: - for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): - for sub_token in self.wordpiece_tokenizer.tokenize(token): - split_tokens.append(sub_token) - else: - split_tokens = self.wordpiece_tokenizer.tokenize(text) - return split_tokens - - def _convert_token_to_id(self, token): - """ Converts a token (str/unicode) in an id using the vocab. """ - return self.vocab.get(token, self.vocab.get(self.unk_token)) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (string/unicode) using the vocab.""" - return self.ids_to_tokens.get(index, self.unk_token) - - def convert_tokens_to_string(self, tokens): - """ Converts a sequence of tokens (string) in a single string. """ - out_string = ' '.join(tokens).replace(' ##', '').strip() - return out_string - - def save_vocabulary(self, vocab_path): - """Save the tokenizer vocabulary to a directory or file.""" - index = 0 - if os.path.isdir(vocab_path): - vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file']) - with open(vocab_file, "w", encoding="utf-8") as writer: - for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): - if index != token_index: - logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." - " Please check that the vocabulary is not corrupted!".format(vocab_file)) - index = token_index - writer.write(token + u'\n') - index += 1 - return (vocab_file,) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): - """ Instantiate a BertTokenizer from pre-trained vocabulary files. - """ - if pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES: - if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True): - logger.warning("The pre-trained model you are loading is a cased model but you have not set " - "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " - "you may want to check this behavior.") - kwargs['do_lower_case'] = False - elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True): - logger.warning("The pre-trained model you are loading is an uncased model but you have set " - "`do_lower_case` to False. We are setting `do_lower_case=True` for you " - "but you may want to check this behavior.") - kwargs['do_lower_case'] = True - - return super(BertTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) - - -class BasicTokenizer(object): - """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" - - def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True): - """ Constructs a BasicTokenizer. - - Args: - **do_lower_case**: Whether to lower case the input. - **never_split**: (`optional`) list of str - Kept for backward compatibility purposes. - Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) - List of token not to split. - **tokenize_chinese_chars**: (`optional`) boolean (default True) - Whether to tokenize Chinese characters. - This should likely be desactivated for Japanese: - see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328 - """ - if never_split is None: - never_split = [] - self.do_lower_case = do_lower_case - self.never_split = never_split - self.tokenize_chinese_chars = tokenize_chinese_chars - - def tokenize(self, text, never_split=None): - """ Basic Tokenization of a piece of text. - Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. - - Args: - **never_split**: (`optional`) list of str - Kept for backward compatibility purposes. - Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) - List of token not to split. - """ - never_split = self.never_split + (never_split if never_split is not None else []) - text = self._clean_text(text) - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - if self.tokenize_chinese_chars: - text = self._tokenize_chinese_chars(text) - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if self.do_lower_case and token not in never_split: - token = token.lower() - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text, never_split=None): - """Splits punctuation on a piece of text.""" - if never_split is not None and text in never_split: - return [text] - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp) or char.isdigit(): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -class WordpieceTokenizer(object): - """Runs WordPiece tokenization.""" - - def __init__(self, vocab, unk_token, max_input_chars_per_word=100): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """Tokenizes a piece of text into its word pieces. - - This uses a greedy longest-match-first algorithm to perform tokenization - using the given vocabulary. - - For example: - input = "unaffable" - output = ["un", "##aff", "##able"] - - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through `BasicTokenizer`. - - Returns: - A list of wordpiece tokens. - """ - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens - - -def _is_whitespace(char): - """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them - # as whitespace since they are generally considered as such. - if char == " " or char == "\t" or char == "\n" or char == "\r": - return True - cat = unicodedata.category(char) - if cat == "Zs": - return True - return False - - -def _is_control(char): - """Checks whether `chars` is a control character.""" - # These are technically control characters but we count them as whitespace - # characters. - if char == "\t" or char == "\n" or char == "\r": - return False - cat = unicodedata.category(char) - if cat.startswith("C"): - return True - return False - - -def _is_punctuation(char): - """Checks whether `chars` is a punctuation character.""" - cp = ord(char) - # We treat all non-letter/number ASCII as punctuation. - # Characters such as "^", "$", and "`" are not in the Unicode - # Punctuation class but we treat them as punctuation anyways, for - # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or - (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False diff --git a/spaces/PKaushik/humandetect/yolov6/models/efficientrep.py b/spaces/PKaushik/humandetect/yolov6/models/efficientrep.py deleted file mode 100644 index 4cb9f1ca431b466af460212becec3b0e7ea15d5c..0000000000000000000000000000000000000000 --- a/spaces/PKaushik/humandetect/yolov6/models/efficientrep.py +++ /dev/null @@ -1,102 +0,0 @@ -from torch import nn -from yolov6.layers.common import RepVGGBlock, RepBlock, SimSPPF - - -class EfficientRep(nn.Module): - '''EfficientRep Backbone - EfficientRep is handcrafted by hardware-aware neural network design. - With rep-style struct, EfficientRep is friendly to high-computation hardware(e.g. GPU). - ''' - - def __init__( - self, - in_channels=3, - channels_list=None, - num_repeats=None, - ): - super().__init__() - - assert channels_list is not None - assert num_repeats is not None - - self.stem = RepVGGBlock( - in_channels=in_channels, - out_channels=channels_list[0], - kernel_size=3, - stride=2 - ) - - self.ERBlock_2 = nn.Sequential( - RepVGGBlock( - in_channels=channels_list[0], - out_channels=channels_list[1], - kernel_size=3, - stride=2 - ), - RepBlock( - in_channels=channels_list[1], - out_channels=channels_list[1], - n=num_repeats[1] - ) - ) - - self.ERBlock_3 = nn.Sequential( - RepVGGBlock( - in_channels=channels_list[1], - out_channels=channels_list[2], - kernel_size=3, - stride=2 - ), - RepBlock( - in_channels=channels_list[2], - out_channels=channels_list[2], - n=num_repeats[2] - ) - ) - - self.ERBlock_4 = nn.Sequential( - RepVGGBlock( - in_channels=channels_list[2], - out_channels=channels_list[3], - kernel_size=3, - stride=2 - ), - RepBlock( - in_channels=channels_list[3], - out_channels=channels_list[3], - n=num_repeats[3] - ) - ) - - self.ERBlock_5 = nn.Sequential( - RepVGGBlock( - in_channels=channels_list[3], - out_channels=channels_list[4], - kernel_size=3, - stride=2, - ), - RepBlock( - in_channels=channels_list[4], - out_channels=channels_list[4], - n=num_repeats[4] - ), - SimSPPF( - in_channels=channels_list[4], - out_channels=channels_list[4], - kernel_size=5 - ) - ) - - def forward(self, x): - - outputs = [] - x = self.stem(x) - x = self.ERBlock_2(x) - x = self.ERBlock_3(x) - outputs.append(x) - x = self.ERBlock_4(x) - outputs.append(x) - x = self.ERBlock_5(x) - outputs.append(x) - - return tuple(outputs) diff --git a/spaces/PaddlePaddle/resnet_v2_34_imagenet/README.md b/spaces/PaddlePaddle/resnet_v2_34_imagenet/README.md deleted file mode 100644 index 569455633ab89e244b75479a1293f6f060436824..0000000000000000000000000000000000000000 --- a/spaces/PaddlePaddle/resnet_v2_34_imagenet/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Resnet_v2_34_imagenet -emoji: 🌖 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 2.8.12 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/parallel/distributed_deprecated.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/parallel/distributed_deprecated.py deleted file mode 100644 index 676937a2085d4da20fa87923041a200fca6214eb..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/parallel/distributed_deprecated.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.distributed as dist -import torch.nn as nn -from torch._utils import (_flatten_dense_tensors, _take_tensors, - _unflatten_dense_tensors) - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version -from .registry import MODULE_WRAPPERS -from .scatter_gather import scatter_kwargs - - -@MODULE_WRAPPERS.register_module() -class MMDistributedDataParallel(nn.Module): - - def __init__(self, - module, - dim=0, - broadcast_buffers=True, - bucket_cap_mb=25): - super(MMDistributedDataParallel, self).__init__() - self.module = module - self.dim = dim - self.broadcast_buffers = broadcast_buffers - - self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024 - self._sync_params() - - def _dist_broadcast_coalesced(self, tensors, buffer_size): - for tensors in _take_tensors(tensors, buffer_size): - flat_tensors = _flatten_dense_tensors(tensors) - dist.broadcast(flat_tensors, 0) - for tensor, synced in zip( - tensors, _unflatten_dense_tensors(flat_tensors, tensors)): - tensor.copy_(synced) - - def _sync_params(self): - module_states = list(self.module.state_dict().values()) - if len(module_states) > 0: - self._dist_broadcast_coalesced(module_states, - self.broadcast_bucket_size) - if self.broadcast_buffers: - if (TORCH_VERSION != 'parrots' - and digit_version(TORCH_VERSION) < digit_version('1.0')): - buffers = [b.data for b in self.module._all_buffers()] - else: - buffers = [b.data for b in self.module.buffers()] - if len(buffers) > 0: - self._dist_broadcast_coalesced(buffers, - self.broadcast_bucket_size) - - def scatter(self, inputs, kwargs, device_ids): - return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) - - def forward(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, - [torch.cuda.current_device()]) - return self.module(*inputs[0], **kwargs[0]) - - def train_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, - [torch.cuda.current_device()]) - output = self.module.train_step(*inputs[0], **kwargs[0]) - return output - - def val_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, - [torch.cuda.current_device()]) - output = self.module.val_step(*inputs[0], **kwargs[0]) - return output diff --git a/spaces/RMXK/RVC_HFF/guidml.py b/spaces/RMXK/RVC_HFF/guidml.py deleted file mode 100644 index aa35e9f8e3386bfec61fc9ad6f807b458ab35882..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/guidml.py +++ /dev/null @@ -1,710 +0,0 @@ -""" -0416后的更新: - 引入config中half - 重建npy而不用填写 - v2支持 - 无f0模型支持 - 修复 - - int16: - 增加无索引支持 - f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好 -""" -import os, sys, traceback, re - -import json - -now_dir = os.getcwd() -sys.path.append(now_dir) -from configs.config import Config - -Config = Config() - -import torch_directml -import PySimpleGUI as sg -import sounddevice as sd -import noisereduce as nr -import numpy as np -from fairseq import checkpoint_utils -import librosa, torch, pyworld, faiss, time, threading -import torch.nn.functional as F -import torchaudio.transforms as tat -import scipy.signal as signal - - -# import matplotlib.pyplot as plt -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from i18n import I18nAuto - -i18n = I18nAuto() -device = torch_directml.device(torch_directml.default_device()) -current_dir = os.getcwd() - - -class RVC: - def __init__( - self, key, hubert_path, pth_path, index_path, npy_path, index_rate - ) -> None: - """ - 初始化 - """ - try: - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - if index_rate != 0: - self.index = faiss.read_index(index_path) - # self.big_npy = np.load(npy_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - model_path = hubert_path - print("load model(s) from {}".format(model_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", - ) - self.model = models[0] - self.model = self.model.to(device) - if Config.is_half: - self.model = self.model.half() - else: - self.model = self.model.float() - self.model.eval() - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if Config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - except: - print(traceback.format_exc()) - - def get_f0(self, x, f0_up_key, inp_f0=None): - x_pad = 1 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def infer(self, feats: torch.Tensor) -> np.ndarray: - """ - 推理函数 - """ - audio = feats.clone().cpu().numpy() - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - if Config.is_half: - feats = feats.half() - else: - feats = feats.float() - inputs = { - "source": feats.to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9 if self.version == "v1" else 12, - } - torch.cuda.synchronize() - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - - ####索引优化 - try: - if ( - hasattr(self, "index") - and hasattr(self, "big_npy") - and self.index_rate != 0 - ): - npy = feats[0].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if Config.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate - + (1 - self.index_rate) * feats - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - torch.cuda.synchronize() - print(feats.shape) - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(audio, self.f0_up_key) - p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存 - else: - pitch, pitchf = None, None - p_len = min(feats.shape[1], 13000) # 太大了爆显存 - torch.cuda.synchronize() - # print(feats.shape,pitch.shape) - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - p_len = torch.LongTensor([p_len]).to(device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float() - ) - torch.cuda.synchronize() - return infered_audio - - -class GUIConfig: - def __init__(self) -> None: - self.hubert_path: str = "" - self.pth_path: str = "" - self.index_path: str = "" - self.npy_path: str = "" - self.pitch: int = 12 - self.samplerate: int = 44100 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -30 - self.crossfade_time: float = 0.08 - self.extra_time: float = 0.04 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.index_rate = 0.3 - - -class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) = self.get_devices() - try: - with open("values1.json", "r") as j: - data = json.load(j) - except: - with open("values1.json", "w") as j: - data = { - "pth_path": "", - "index_path": "", - "sg_input_device": input_devices[ - input_devices_indices.index(sd.default.device[0]) - ], - "sg_output_device": output_devices[ - output_devices_indices.index(sd.default.device[1]) - ], - "threhold": "-45", - "pitch": "0", - "index_rate": "0", - "block_time": "1", - "crossfade_length": "0.04", - "extra_time": "1", - } - return data - - def launcher(self): - data = self.load() - sg.theme("LightBlue3") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title=i18n("Load model"), - layout=[ - [ - sg.Input( - default_text="hubert_base.pt", - key="hubert_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Hubert Model"), - initial_folder=os.path.join(os.getcwd()), - file_types=(("pt files", "*.pt"),), - ), - ], - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("Select the .pth file"), - initial_folder=os.path.join(os.getcwd(), "weights"), - file_types=(("weight files", "*.pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("Select the .index file"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("index files", "*.index"),), - ), - ], - [ - sg.Input( - default_text="你不需要填写这个You don't need write this.", - key="npy_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Select the .npy file"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("feature files", "*.npy"),), - ), - ], - ], - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("Input device")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("Output device")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - ], - title=i18n("Audio device (please use the same type of driver)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("Response threshold")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", ""), - ), - ], - [ - sg.Text(i18n("Pitch settings")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", ""), - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", ""), - ), - ], - ], - title=i18n("General settings"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("Sample length")), - sg.Slider( - range=(0.1, 3.0), - key="block_time", - resolution=0.1, - orientation="h", - default_value=data.get("block_time", ""), - ), - ], - [ - sg.Text(i18n("Fade length")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", ""), - ), - ], - [ - sg.Text(i18n("Extra推理时长")), - sg.Slider( - range=(0.05, 3.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", ""), - ), - ], - [ - sg.Checkbox(i18n("Input noise reduction"), key="I_noise_reduce"), - sg.Checkbox(i18n("Output noise reduction"), key="O_noise_reduce"), - ], - ], - title=i18n("Performance settings"), - ), - ], - [ - sg.Button(i18n("开始音频Convert"), key="start_vc"), - sg.Button(i18n("停止音频Convert"), key="stop_vc"), - sg.Text(i18n("Inference time (ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - print("using_cuda:" + str(torch.cuda.is_available())) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - } - with open("values1.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("Select the pth file")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("Select the index file")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["hubert_path"]): - sg.popup(i18n("The hubert model path must not contain Chinese characters")) - return False - if pattern.findall(values["pth_path"]): - sg.popup(i18n("The pth file path must not contain Chinese characters.")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("The index file path must not contain Chinese characters.")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt") - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.npy_path = values["npy_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.index_rate = values["index_rate"] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.block_frame = int(self.config.block_time * self.config.samplerate) - self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate) - self.sola_search_frame = int(0.012 * self.config.samplerate) - self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s - self.extra_frame = int(self.config.extra_time * self.config.samplerate) - self.rvc = None - self.rvc = RVC( - self.config.pitch, - self.config.hubert_path, - self.config.pth_path, - self.config.index_path, - self.config.npy_path, - self.config.index_rate, - ) - self.input_wav: np.ndarray = np.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - dtype="float32", - ) - self.output_wav: torch.Tensor = torch.zeros( - self.block_frame, device=device, dtype=torch.float32 - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_in_window: torch.Tensor = torch.linspace( - 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler1 = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ) - self.resampler2 = tat.Resample( - orig_freq=self.rvc.tgt_sr, - new_freq=self.config.samplerate, - dtype=torch.float32, - ) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - with sd.Stream( - channels=2, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - print("Audio block passed.") - print("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.I_noise_reduce: - indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate) - - """noise gate""" - frame_length = 2048 - hop_length = 1024 - rms = librosa.feature.rms( - y=indata, frame_length=frame_length, hop_length=hop_length - ) - db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - # print(rms.shape,db.shape,db) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * hop_length : (i + 1) * hop_length] = 0 - self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata) - - # infer - print("input_wav:" + str(self.input_wav.shape)) - # print('infered_wav:'+str(infer_wav.shape)) - infer_wav: torch.Tensor = self.resampler2( - self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav))) - )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to( - device - ) - print("infer_wav:" + str(infer_wav.shape)) - - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - cor_nom = F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame], - self.sola_buffer[None, None, :], - ) - cor_den = torch.sqrt( - F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - ** 2, - torch.ones(1, 1, self.crossfade_frame, device=device), - ) - + 1e-8 - ) - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - print("sola offset: " + str(int(sola_offset))) - - # crossfade - self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame] - self.output_wav[: self.crossfade_frame] *= self.fade_in_window - self.output_wav[: self.crossfade_frame] += self.sola_buffer[:] - if sola_offset < self.sola_search_frame: - self.sola_buffer[:] = ( - infer_wav[ - -self.sola_search_frame - - self.crossfade_frame - + sola_offset : -self.sola_search_frame - + sola_offset - ] - * self.fade_out_window - ) - else: - self.sola_buffer[:] = ( - infer_wav[-self.crossfade_frame :] * self.fade_out_window - ) - - if self.config.O_noise_reduce: - outdata[:] = np.tile( - nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate - ), - (2, 1), - ).T - else: - outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - print("infer time:" + str(total_time)) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[input_devices.index(input_device)] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - print("input device:" + str(sd.default.device[0]) + ":" + str(input_device)) - print("output device:" + str(sd.default.device[1]) + ":" + str(output_device)) - - -gui = GUI() diff --git a/spaces/RamAnanth1/Youtube-to-HF-Dataset/downloader/downloader.py b/spaces/RamAnanth1/Youtube-to-HF-Dataset/downloader/downloader.py deleted file mode 100644 index 7408e38299c372a1216b95462807927b17ed6ca1..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/Youtube-to-HF-Dataset/downloader/downloader.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC, abstractmethod - -class Downloader(ABC): - """ - A video downloader from online platforms to a specified format - """ - - @abstractmethod - def __init__(self, download_path): - self.download_path = download_path - - @abstractmethod - def download(self): - pass \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pyparsing/actions.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pyparsing/actions.py deleted file mode 100644 index f72c66e743146c7a5b70a5440e9ab5459f10245b..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pyparsing/actions.py +++ /dev/null @@ -1,207 +0,0 @@ -# actions.py - -from .exceptions import ParseException -from .util import col - - -class OnlyOnce: - """ - Wrapper for parse actions, to ensure they are only called once. - """ - - def __init__(self, method_call): - from .core import _trim_arity - - self.callable = _trim_arity(method_call) - self.called = False - - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") - - def reset(self): - """ - Allow the associated parse action to be called once more. - """ - - self.called = False - - -def match_only_at_col(n): - """ - Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - - def verify_col(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column {}".format(n)) - - return verify_col - - -def replace_with(repl_str): - """ - Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transform_string` (). - - Example:: - - num = Word(nums).set_parse_action(lambda toks: int(toks[0])) - na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) - term = na | num - - term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [repl_str] - - -def remove_quotes(s, l, t): - """ - Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use remove_quotes to strip quotation marks from parsed results - quoted_string.set_parse_action(remove_quotes) - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - - -def with_attribute(*args, **attr_dict): - """ - Helper to create a validating parse action to be used with start - tags created with :class:`make_xml_tags` or - :class:`make_html_tags`. Use ``with_attribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - "attribute {!r} has value {!r}, must be {!r}".format( - attrName, tokens[attrName], attrValue - ), - ) - - return pa - - -with_attribute.ANY_VALUE = object() - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "{}:class".format(namespace) if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -replaceWith = replace_with -removeQuotes = remove_quotes -withAttribute = with_attribute -withClass = with_class -matchOnlyAtCol = match_only_at_col diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/errors.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/errors.py deleted file mode 100644 index 0bcbe53ef59373c608e62ea285536f8b22b47ecb..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/errors.py +++ /dev/null @@ -1,34 +0,0 @@ -class ConsoleError(Exception): - """An error in console operation.""" - - -class StyleError(Exception): - """An error in styles.""" - - -class StyleSyntaxError(ConsoleError): - """Style was badly formatted.""" - - -class MissingStyle(StyleError): - """No such style.""" - - -class StyleStackError(ConsoleError): - """Style stack is invalid.""" - - -class NotRenderableError(ConsoleError): - """Object is not renderable.""" - - -class MarkupError(ConsoleError): - """Markup was badly formatted.""" - - -class LiveError(ConsoleError): - """Error related to Live display.""" - - -class NoAltScreen(ConsoleError): - """Alt screen mode was required.""" diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/_functools.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/_functools.py deleted file mode 100644 index e7053bac12fdb7b2cc50448f88318cd93f62cc0e..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/_functools.py +++ /dev/null @@ -1,20 +0,0 @@ -import functools - - -# from jaraco.functools 3.5 -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper diff --git a/spaces/RealTimeLiveAIForHealth/WebcamObjectRecognition/utils.py b/spaces/RealTimeLiveAIForHealth/WebcamObjectRecognition/utils.py deleted file mode 100644 index 1144290321351cbf14fb06c8cb2e13782a818e71..0000000000000000000000000000000000000000 --- a/spaces/RealTimeLiveAIForHealth/WebcamObjectRecognition/utils.py +++ /dev/null @@ -1,475 +0,0 @@ -import numpy as np -import cv2 -import pandas as pd -import operator -import matplotlib.pyplot as plt -import os -from sklearn.model_selection import train_test_split -from tensorflow.keras.utils import Sequence -from config import yolo_config - - -def load_weights(model, weights_file_path): - conv_layer_size = 110 - conv_output_idxs = [93, 101, 109] - with open(weights_file_path, 'rb') as file: - major, minor, revision, seen, _ = np.fromfile(file, dtype=np.int32, count=5) - - bn_idx = 0 - for conv_idx in range(conv_layer_size): - conv_layer_name = f'conv2d_{conv_idx}' if conv_idx > 0 else 'conv2d' - bn_layer_name = f'batch_normalization_{bn_idx}' if bn_idx > 0 else 'batch_normalization' - - conv_layer = model.get_layer(conv_layer_name) - filters = conv_layer.filters - kernel_size = conv_layer.kernel_size[0] - input_dims = conv_layer.input_shape[-1] - - if conv_idx not in conv_output_idxs: - # darknet bn layer weights: [beta, gamma, mean, variance] - bn_weights = np.fromfile(file, dtype=np.float32, count=4 * filters) - # tf bn layer weights: [gamma, beta, mean, variance] - bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]] - bn_layer = model.get_layer(bn_layer_name) - bn_idx += 1 - else: - conv_bias = np.fromfile(file, dtype=np.float32, count=filters) - - # darknet shape: (out_dim, input_dims, height, width) - # tf shape: (height, width, input_dims, out_dim) - conv_shape = (filters, input_dims, kernel_size, kernel_size) - conv_weights = np.fromfile(file, dtype=np.float32, count=np.product(conv_shape)) - conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0]) - - if conv_idx not in conv_output_idxs: - conv_layer.set_weights([conv_weights]) - bn_layer.set_weights(bn_weights) - else: - conv_layer.set_weights([conv_weights, conv_bias]) - - if len(file.read()) == 0: - print('all weights read') - else: - print(f'failed to read all weights, # of unread weights: {len(file.read())}') - - -def get_detection_data(img, model_outputs, class_names): - """ - - :param img: target raw image - :param model_outputs: outputs from inference_model - :param class_names: list of object class names - :return: - """ - - num_bboxes = model_outputs[-1][0] - boxes, scores, classes = [output[0][:num_bboxes] for output in model_outputs[:-1]] - - h, w = img.shape[:2] - df = pd.DataFrame(boxes, columns=['x1', 'y1', 'x2', 'y2']) - df[['x1', 'x2']] = (df[['x1', 'x2']] * w).astype('int64') - df[['y1', 'y2']] = (df[['y1', 'y2']] * h).astype('int64') - df['class_name'] = np.array(class_names)[classes.astype('int64')] - df['score'] = scores - df['w'] = df['x2'] - df['x1'] - df['h'] = df['y2'] - df['y1'] - - print(f'# of bboxes: {num_bboxes}') - return df - -def read_annotation_lines(annotation_path, test_size=None, random_seed=5566): - with open(annotation_path) as f: - lines = f.readlines() - if test_size: - return train_test_split(lines, test_size=test_size, random_state=random_seed) - else: - return lines - -def draw_bbox(img, detections, cmap, random_color=True, figsize=(10, 10), show_img=True, show_text=True): - """ - Draw bounding boxes on the img. - :param img: BGR img. - :param detections: pandas DataFrame containing detections - :param random_color: assign random color for each objects - :param cmap: object colormap - :param plot_img: if plot img with bboxes - :return: None - """ - img = np.array(img) - scale = max(img.shape[0:2]) / 416 - line_width = int(2 * scale) - - for _, row in detections.iterrows(): - x1, y1, x2, y2, cls, score, w, h = row.values - color = list(np.random.random(size=3) * 255) if random_color else cmap[cls] - cv2.rectangle(img, (x1, y1), (x2, y2), color, line_width) - if show_text: - text = f'{cls} {score:.2f}' - font = cv2.FONT_HERSHEY_DUPLEX - font_scale = max(0.3 * scale, 0.3) - thickness = max(int(1 * scale), 1) - (text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=thickness)[0] - cv2.rectangle(img, (x1 - line_width//2, y1 - text_height), (x1 + text_width, y1), color, cv2.FILLED) - cv2.putText(img, text, (x1, y1), font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA) - if show_img: - plt.figure(figsize=figsize) - plt.imshow(img) - plt.show() - return img - - -class DataGenerator(Sequence): - """ - Generates data for Keras - ref: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly - """ - def __init__(self, - annotation_lines, - class_name_path, - folder_path, - max_boxes=100, - shuffle=True): - self.annotation_lines = annotation_lines - self.class_name_path = class_name_path - self.num_classes = len([line.strip() for line in open(class_name_path).readlines()]) - self.num_gpu = yolo_config['num_gpu'] - self.batch_size = yolo_config['batch_size'] * self.num_gpu - self.target_img_size = yolo_config['img_size'] - self.anchors = np.array(yolo_config['anchors']).reshape((9, 2)) - self.shuffle = shuffle - self.indexes = np.arange(len(self.annotation_lines)) - self.folder_path = folder_path - self.max_boxes = max_boxes - self.on_epoch_end() - - def __len__(self): - 'number of batches per epoch' - return int(np.ceil(len(self.annotation_lines) / self.batch_size)) - - def __getitem__(self, index): - 'Generate one batch of data' - - # Generate indexes of the batch - idxs = self.indexes[index * self.batch_size:(index + 1) * self.batch_size] - - # Find list of IDs - lines = [self.annotation_lines[i] for i in idxs] - - # Generate data - X, y_tensor, y_bbox = self.__data_generation(lines) - - return [X, *y_tensor, y_bbox], np.zeros(len(lines)) - - def on_epoch_end(self): - 'Updates indexes after each epoch' - if self.shuffle: - np.random.shuffle(self.indexes) - - def __data_generation(self, annotation_lines): - """ - Generates data containing batch_size samples - :param annotation_lines: - :return: - """ - - X = np.empty((len(annotation_lines), *self.target_img_size), dtype=np.float32) - y_bbox = np.empty((len(annotation_lines), self.max_boxes, 5), dtype=np.float32) # x1y1x2y2 - - for i, line in enumerate(annotation_lines): - img_data, box_data = self.get_data(line) - X[i] = img_data - y_bbox[i] = box_data - - y_tensor, y_true_boxes_xywh = preprocess_true_boxes(y_bbox, self.target_img_size[:2], self.anchors, self.num_classes) - - return X, y_tensor, y_true_boxes_xywh - - def get_data(self, annotation_line): - line = annotation_line.split() - img_path = line[0] - img = cv2.imread(os.path.join(self.folder_path, img_path))[:, :, ::-1] - ih, iw = img.shape[:2] - h, w, c = self.target_img_size - boxes = np.array([np.array(list(map(float, box.split(',')))) for box in line[1:]], dtype=np.float32) # x1y1x2y2 - scale_w, scale_h = w / iw, h / ih - img = cv2.resize(img, (w, h)) - image_data = np.array(img) / 255. - - # correct boxes coordinates - box_data = np.zeros((self.max_boxes, 5)) - if len(boxes) > 0: - np.random.shuffle(boxes) - boxes = boxes[:self.max_boxes] - boxes[:, [0, 2]] = boxes[:, [0, 2]] * scale_w # + dx - boxes[:, [1, 3]] = boxes[:, [1, 3]] * scale_h # + dy - box_data[:len(boxes)] = boxes - - return image_data, box_data - - -def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes): - '''Preprocess true boxes to training input format - - Parameters - ---------- - true_boxes: array, shape=(bs, max boxes per img, 5) - Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape. - input_shape: array-like, hw, multiples of 32 - anchors: array, shape=(N, 2), (9, wh) - num_classes: int - - Returns - ------- - y_true: list of array, shape like yolo_outputs, xywh are reletive value - - ''' - - num_stages = 3 # default setting for yolo, tiny yolo will be 2 - anchor_mask = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - bbox_per_grid = 3 - true_boxes = np.array(true_boxes, dtype='float32') - true_boxes_abs = np.array(true_boxes, dtype='float32') - input_shape = np.array(input_shape, dtype='int32') - true_boxes_xy = (true_boxes_abs[..., 0:2] + true_boxes_abs[..., 2:4]) // 2 # (100, 2) - true_boxes_wh = true_boxes_abs[..., 2:4] - true_boxes_abs[..., 0:2] # (100, 2) - - # Normalize x,y,w, h, relative to img size -> (0~1) - true_boxes[..., 0:2] = true_boxes_xy/input_shape[::-1] # xy - true_boxes[..., 2:4] = true_boxes_wh/input_shape[::-1] # wh - - bs = true_boxes.shape[0] - grid_sizes = [input_shape//{0:8, 1:16, 2:32}[stage] for stage in range(num_stages)] - y_true = [np.zeros((bs, - grid_sizes[s][0], - grid_sizes[s][1], - bbox_per_grid, - 5+num_classes), dtype='float32') - for s in range(num_stages)] - # [(?, 52, 52, 3, 5+num_classes) (?, 26, 26, 3, 5+num_classes) (?, 13, 13, 3, 5+num_classes) ] - y_true_boxes_xywh = np.concatenate((true_boxes_xy, true_boxes_wh), axis=-1) - # Expand dim to apply broadcasting. - anchors = np.expand_dims(anchors, 0) # (1, 9 , 2) - anchor_maxes = anchors / 2. # (1, 9 , 2) - anchor_mins = -anchor_maxes # (1, 9 , 2) - valid_mask = true_boxes_wh[..., 0] > 0 # (1, 100) - - for batch_idx in range(bs): - # Discard zero rows. - wh = true_boxes_wh[batch_idx, valid_mask[batch_idx]] # (# of bbox, 2) - num_boxes = len(wh) - if num_boxes == 0: continue - wh = np.expand_dims(wh, -2) # (# of bbox, 1, 2) - box_maxes = wh / 2. # (# of bbox, 1, 2) - box_mins = -box_maxes # (# of bbox, 1, 2) - - # Compute IoU between each anchors and true boxes for responsibility assignment - intersect_mins = np.maximum(box_mins, anchor_mins) # (# of bbox, 9, 2) - intersect_maxes = np.minimum(box_maxes, anchor_maxes) - intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.) - intersect_area = np.prod(intersect_wh, axis=-1) # (9,) - box_area = wh[..., 0] * wh[..., 1] # (# of bbox, 1) - anchor_area = anchors[..., 0] * anchors[..., 1] # (1, 9) - iou = intersect_area / (box_area + anchor_area - intersect_area) # (# of bbox, 9) - - # Find best anchor for each true box - best_anchors = np.argmax(iou, axis=-1) # (# of bbox,) - for box_idx in range(num_boxes): - best_anchor = best_anchors[box_idx] - for stage in range(num_stages): - if best_anchor in anchor_mask[stage]: - x_offset = true_boxes[batch_idx, box_idx, 0]*grid_sizes[stage][1] - y_offset = true_boxes[batch_idx, box_idx, 1]*grid_sizes[stage][0] - # Grid Index - grid_col = np.floor(x_offset).astype('int32') - grid_row = np.floor(y_offset).astype('int32') - anchor_idx = anchor_mask[stage].index(best_anchor) - class_idx = true_boxes[batch_idx, box_idx, 4].astype('int32') - # y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, 0] = x_offset - grid_col # x - # y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, 1] = y_offset - grid_row # y - # y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, :4] = true_boxes_abs[batch_idx, box_idx, :4] # abs xywh - y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, :2] = true_boxes_xy[batch_idx, box_idx, :] # abs xy - y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, 2:4] = true_boxes_wh[batch_idx, box_idx, :] # abs wh - y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, 4] = 1 # confidence - - y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, 5+class_idx] = 1 # one-hot encoding - # smooth - # onehot = np.zeros(num_classes, dtype=np.float) - # onehot[class_idx] = 1.0 - # uniform_distribution = np.full(num_classes, 1.0 / num_classes) - # delta = 0.01 - # smooth_onehot = onehot * (1 - delta) + delta * uniform_distribution - # y_true[stage][batch_idx, grid_row, grid_col, anchor_idx, 5:] = smooth_onehot - - return y_true, y_true_boxes_xywh - -""" - Calculate the AP given the recall and precision array - 1st) We compute a version of the measured precision/recall curve with - precision monotonically decreasing - 2nd) We compute the AP as the area under this curve by numerical integration. -""" -def voc_ap(rec, prec): - """ - --- Official matlab code VOC2012--- - mrec=[0 ; rec ; 1]; - mpre=[0 ; prec ; 0]; - for i=numel(mpre)-1:-1:1 - mpre(i)=max(mpre(i),mpre(i+1)); - end - i=find(mrec(2:end)~=mrec(1:end-1))+1; - ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); - """ - rec.insert(0, 0.0) # insert 0.0 at begining of list - rec.append(1.0) # insert 1.0 at end of list - mrec = rec[:] - prec.insert(0, 0.0) # insert 0.0 at begining of list - prec.append(0.0) # insert 0.0 at end of list - mpre = prec[:] - """ - This part makes the precision monotonically decreasing - (goes from the end to the beginning) - matlab: for i=numel(mpre)-1:-1:1 - mpre(i)=max(mpre(i),mpre(i+1)); - """ - # matlab indexes start in 1 but python in 0, so I have to do: - # range(start=(len(mpre) - 2), end=0, step=-1) - # also the python function range excludes the end, resulting in: - # range(start=(len(mpre) - 2), end=-1, step=-1) - for i in range(len(mpre)-2, -1, -1): - mpre[i] = max(mpre[i], mpre[i+1]) - """ - This part creates a list of indexes where the recall changes - matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1; - """ - i_list = [] - for i in range(1, len(mrec)): - if mrec[i] != mrec[i-1]: - i_list.append(i) # if it was matlab would be i + 1 - """ - The Average Precision (AP) is the area under the curve - (numerical integration) - matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); - """ - ap = 0.0 - for i in i_list: - ap += ((mrec[i]-mrec[i-1])*mpre[i]) - return ap, mrec, mpre - -""" - Draw plot using Matplotlib -""" -def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar): - # sort the dictionary by decreasing value, into a list of tuples - sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1)) - print(sorted_dic_by_value) - # unpacking the list of tuples into two lists - sorted_keys, sorted_values = zip(*sorted_dic_by_value) - # - if true_p_bar != "": - """ - Special case to draw in: - - green -> TP: True Positives (object detected and matches ground-truth) - - red -> FP: False Positives (object detected but does not match ground-truth) - - pink -> FN: False Negatives (object not detected but present in the ground-truth) - """ - fp_sorted = [] - tp_sorted = [] - for key in sorted_keys: - fp_sorted.append(dictionary[key] - true_p_bar[key]) - tp_sorted.append(true_p_bar[key]) - plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive') - plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted) - # add legend - plt.legend(loc='lower right') - """ - Write number on side of bar - """ - fig = plt.gcf() # gcf - get current figure - axes = plt.gca() - r = fig.canvas.get_renderer() - for i, val in enumerate(sorted_values): - fp_val = fp_sorted[i] - tp_val = tp_sorted[i] - fp_str_val = " " + str(fp_val) - tp_str_val = fp_str_val + " " + str(tp_val) - # trick to paint multicolor with offset: - # first paint everything and then repaint the first number - t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold') - plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold') - if i == (len(sorted_values)-1): # largest bar - adjust_axes(r, t, fig, axes) - else: - plt.barh(range(n_classes), sorted_values, color=plot_color) - """ - Write number on side of bar - """ - fig = plt.gcf() # gcf - get current figure - axes = plt.gca() - r = fig.canvas.get_renderer() - for i, val in enumerate(sorted_values): - str_val = " " + str(val) # add a space before - if val < 1.0: - str_val = " {0:.2f}".format(val) - t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold') - # re-set axes to show number inside the figure - if i == (len(sorted_values)-1): # largest bar - adjust_axes(r, t, fig, axes) - # set window title - fig.canvas.set_window_title(window_title) - # write classes in y axis - tick_font_size = 12 - plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size) - """ - Re-scale height accordingly - """ - init_height = fig.get_figheight() - # comput the matrix height in points and inches - dpi = fig.dpi - height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing) - height_in = height_pt / dpi - # compute the required figure height - top_margin = 0.15 # in percentage of the figure height - bottom_margin = 0.05 # in percentage of the figure height - figure_height = height_in / (1 - top_margin - bottom_margin) - # set new height - if figure_height > init_height: - fig.set_figheight(figure_height) - - # set plot title - plt.title(plot_title, fontsize=14) - # set axis titles - # plt.xlabel('classes') - plt.xlabel(x_label, fontsize='large') - # adjust size of window - fig.tight_layout() - # save the plot - fig.savefig(output_path) - # show image - # if to_show: - plt.show() - # close the plot - # plt.close() - -""" - Plot - adjust axes -""" -def adjust_axes(r, t, fig, axes): - # get text width for re-scaling - bb = t.get_window_extent(renderer=r) - text_width_inches = bb.width / fig.dpi - # get axis width in inches - current_fig_width = fig.get_figwidth() - new_fig_width = current_fig_width + text_width_inches - propotion = new_fig_width / current_fig_width - # get axis limit - x_lim = axes.get_xlim() - axes.set_xlim([x_lim[0], x_lim[1]*propotion]) - - -def read_txt_to_list(path): - # open txt file lines to a list - with open(path) as f: - content = f.readlines() - # remove whitespace characters like `\n` at the end of each line - content = [x.strip() for x in content] - return content \ No newline at end of file diff --git a/spaces/Rebskii/rvc-models-test/infer_pack/models.py b/spaces/Rebskii/rvc-models-test/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/Rebskii/rvc-models-test/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Ritori/TTS_Yui/hifi-gan/meldataset.py b/spaces/Ritori/TTS_Yui/hifi-gan/meldataset.py deleted file mode 100644 index 44b0bf45aaeaa88896bd6d64e0821dfc5399f5bd..0000000000000000000000000000000000000000 --- a/spaces/Ritori/TTS_Yui/hifi-gan/meldataset.py +++ /dev/null @@ -1,168 +0,0 @@ -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -from librosa.util import normalize -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - - spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -def get_dataset_filelist(a): - with open(a.input_training_file, 'r', encoding='utf-8') as fi: - training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0]) - for x in fi.read().split('\n') if len(x) > 0] - - with open(a.input_validation_file, 'r', encoding='utf-8') as fi: - validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0]) - for x in fi.read().split('\n') if len(x) > 0] - return training_files, validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__(self, training_files, segment_size, n_fft, num_mels, - hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1, - device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - def __getitem__(self, index): - filename = self.audio_files[index] - if self._cache_ref_count == 0: - audio, sampling_rate = load_wav(filename) - audio = audio / MAX_WAV_VALUE - if not self.fine_tuning: - audio = normalize(audio) * 0.95 - self.cached_wav = audio - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) - - if not self.fine_tuning: - if self.split: - if audio.size(1) >= self.segment_size: - max_audio_start = audio.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio = audio[:, audio_start:audio_start+self.segment_size] - else: - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax, - center=False) - else: - mel = np.load( - os.path.join(self.base_mels_path, os.path.splitext(filename)[0] + '.npy')) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start:mel_start + frames_per_seg] - audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size] - else: - mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant') - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss, - center=False) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - def __len__(self): - return len(self.audio_files) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/pafpn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/pafpn.py deleted file mode 100644 index d7c0b50f29e882aacb5158b33ead3d4566d0ce0b..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/pafpn.py +++ /dev/null @@ -1,142 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import auto_fp16 - -from ..builder import NECKS -from .fpn import FPN - - -@NECKS.register_module() -class PAFPN(FPN): - """Path Aggregation Network for Instance Segmentation. - - This is an implementation of the `PAFPN in Path Aggregation Network - `_. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): Whether to add conv layers on top of the - original feature maps. Default: False. - extra_convs_on_inputs (bool): Whether to apply extra conv on - the original feature from the backbone. Default: False. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (str): Config dict for activation layer in ConvModule. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - extra_convs_on_inputs=True, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None): - super(PAFPN, - self).__init__(in_channels, out_channels, num_outs, start_level, - end_level, add_extra_convs, extra_convs_on_inputs, - relu_before_extra_convs, no_norm_on_lateral, - conv_cfg, norm_cfg, act_cfg) - # add extra bottom up pathway - self.downsample_convs = nn.ModuleList() - self.pafpn_convs = nn.ModuleList() - for i in range(self.start_level + 1, self.backbone_end_level): - d_conv = ConvModule( - out_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - pafpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.downsample_convs.append(d_conv) - self.pafpn_convs.append(pafpn_conv) - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] += F.interpolate( - laterals[i], size=prev_shape, mode='nearest') - - # build outputs - # part 1: from original levels - inter_outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - - # part 2: add bottom-up path - for i in range(0, used_backbone_levels - 1): - inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i]) - - outs = [] - outs.append(inter_outs[0]) - outs.extend([ - self.pafpn_convs[i - 1](inter_outs[i]) - for i in range(1, used_backbone_levels) - ]) - - # part 3: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - orig = inputs[self.backbone_end_level - 1] - outs.append(self.fpn_convs[used_backbone_levels](orig)) - elif self.add_extra_convs == 'on_lateral': - outs.append(self.fpn_convs[used_backbone_levels]( - laterals[-1])) - elif self.add_extra_convs == 'on_output': - outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) - else: - raise NotImplementedError - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py deleted file mode 100644 index 873957d8d6468147c994493d92ff5c1b15bfb703..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py +++ /dev/null @@ -1,98 +0,0 @@ -from torch import nn - -from annotator.uniformer.mmseg.core import add_prefix -from annotator.uniformer.mmseg.ops import resize -from .. import builder -from ..builder import SEGMENTORS -from .encoder_decoder import EncoderDecoder - - -@SEGMENTORS.register_module() -class CascadeEncoderDecoder(EncoderDecoder): - """Cascade Encoder Decoder segmentors. - - CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of - CascadeEncoderDecoder are cascaded. The output of previous decoder_head - will be the input of next decoder_head. - """ - - def __init__(self, - num_stages, - backbone, - decode_head, - neck=None, - auxiliary_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None): - self.num_stages = num_stages - super(CascadeEncoderDecoder, self).__init__( - backbone=backbone, - decode_head=decode_head, - neck=neck, - auxiliary_head=auxiliary_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) - - def _init_decode_head(self, decode_head): - """Initialize ``decode_head``""" - assert isinstance(decode_head, list) - assert len(decode_head) == self.num_stages - self.decode_head = nn.ModuleList() - for i in range(self.num_stages): - self.decode_head.append(builder.build_head(decode_head[i])) - self.align_corners = self.decode_head[-1].align_corners - self.num_classes = self.decode_head[-1].num_classes - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone and heads. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - self.backbone.init_weights(pretrained=pretrained) - for i in range(self.num_stages): - self.decode_head[i].init_weights() - if self.with_auxiliary_head: - if isinstance(self.auxiliary_head, nn.ModuleList): - for aux_head in self.auxiliary_head: - aux_head.init_weights() - else: - self.auxiliary_head.init_weights() - - def encode_decode(self, img, img_metas): - """Encode images with backbone and decode into a semantic segmentation - map of the same size as input.""" - x = self.extract_feat(img) - out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg) - for i in range(1, self.num_stages): - out = self.decode_head[i].forward_test(x, out, img_metas, - self.test_cfg) - out = resize( - input=out, - size=img.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - return out - - def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): - """Run forward function and calculate loss for decode head in - training.""" - losses = dict() - - loss_decode = self.decode_head[0].forward_train( - x, img_metas, gt_semantic_seg, self.train_cfg) - - losses.update(add_prefix(loss_decode, 'decode_0')) - - for i in range(1, self.num_stages): - # forward test again, maybe unnecessary for most methods. - prev_outputs = self.decode_head[i - 1].forward_test( - x, img_metas, self.test_cfg) - loss_decode = self.decode_head[i].forward_train( - x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg) - losses.update(add_prefix(loss_decode, f'decode_{i}')) - - return losses diff --git a/spaces/Rongjiehuang/ProDiff/data_gen/tts/data_gen_utils.py b/spaces/Rongjiehuang/ProDiff/data_gen/tts/data_gen_utils.py deleted file mode 100644 index 0b6bf10862cf3f9a8b2aee560ae5d44eabbf00bc..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/data_gen/tts/data_gen_utils.py +++ /dev/null @@ -1,352 +0,0 @@ -import warnings - -warnings.filterwarnings("ignore") - -# import parselmouth -import os -import torch -from skimage.transform import resize -from utils.text_encoder import TokenTextEncoder -from utils.pitch_utils import f0_to_coarse -import struct -import webrtcvad -from scipy.ndimage.morphology import binary_dilation -import librosa -import numpy as np -from utils import audio -import pyloudnorm as pyln -import re -import json -from collections import OrderedDict - -PUNCS = '!,.?;:' - -int16_max = (2 ** 15) - 1 - - -def trim_long_silences(path, sr=None, return_raw_wav=False, norm=True, vad_max_silence_length=12): - """ - Ensures that segments without voice in the waveform remain no longer than a - threshold determined by the VAD parameters in params.py. - :param wav: the raw waveform as a numpy array of floats - :param vad_max_silence_length: Maximum number of consecutive silent frames a segment can have. - :return: the same waveform with silences trimmed away (length <= original wav length) - """ - - ## Voice Activation Detection - # Window size of the VAD. Must be either 10, 20 or 30 milliseconds. - # This sets the granularity of the VAD. Should not need to be changed. - sampling_rate = 16000 - wav_raw, sr = librosa.core.load(path, sr=sr) - - if norm: - meter = pyln.Meter(sr) # create BS.1770 meter - loudness = meter.integrated_loudness(wav_raw) - wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0) - if np.abs(wav_raw).max() > 1.0: - wav_raw = wav_raw / np.abs(wav_raw).max() - - wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best') - - vad_window_length = 30 # In milliseconds - # Number of frames to average together when performing the moving average smoothing. - # The larger this value, the larger the VAD variations must be to not get smoothed out. - vad_moving_average_width = 8 - - # Compute the voice detection window size - samples_per_window = (vad_window_length * sampling_rate) // 1000 - - # Trim the end of the audio to have a multiple of the window size - wav = wav[:len(wav) - (len(wav) % samples_per_window)] - - # Convert the float waveform to 16-bit mono PCM - pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) - - # Perform voice activation detection - voice_flags = [] - vad = webrtcvad.Vad(mode=3) - for window_start in range(0, len(wav), samples_per_window): - window_end = window_start + samples_per_window - voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], - sample_rate=sampling_rate)) - voice_flags = np.array(voice_flags) - - # Smooth the voice detection with a moving average - def moving_average(array, width): - array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) - ret = np.cumsum(array_padded, dtype=float) - ret[width:] = ret[width:] - ret[:-width] - return ret[width - 1:] / width - - audio_mask = moving_average(voice_flags, vad_moving_average_width) - audio_mask = np.round(audio_mask).astype(np.bool) - - # Dilate the voiced regions - audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) - audio_mask = np.repeat(audio_mask, samples_per_window) - audio_mask = resize(audio_mask, (len(wav_raw),)) > 0 - if return_raw_wav: - return wav_raw, audio_mask, sr - return wav_raw[audio_mask], audio_mask, sr - - -def process_utterance(wav_path, - fft_size=1024, - hop_size=256, - win_length=1024, - window="hann", - num_mels=80, - fmin=80, - fmax=7600, - eps=1e-6, - sample_rate=22050, - loud_norm=False, - min_level_db=-100, - return_linear=False, - trim_long_sil=False, vocoder='pwg'): - if isinstance(wav_path, str): - if trim_long_sil: - wav, _, _ = trim_long_silences(wav_path, sample_rate) - else: - wav, _ = librosa.core.load(wav_path, sr=sample_rate) - else: - wav = wav_path - - if loud_norm: - meter = pyln.Meter(sample_rate) # create BS.1770 meter - loudness = meter.integrated_loudness(wav) - wav = pyln.normalize.loudness(wav, loudness, -22.0) - if np.abs(wav).max() > 1: - wav = wav / np.abs(wav).max() - - # get amplitude spectrogram - x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, - win_length=win_length, window=window, pad_mode="constant") - spc = np.abs(x_stft) # (n_bins, T) - - # get mel basis - fmin = 0 if fmin == -1 else fmin - fmax = sample_rate / 2 if fmax == -1 else fmax - mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax) - mel = mel_basis @ spc - - if vocoder == 'pwg': - mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T) - else: - assert False, f'"{vocoder}" is not in ["pwg"].' - - l_pad, r_pad = audio.librosa_pad_lr(wav, fft_size, hop_size, 1) - wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0) - wav = wav[:mel.shape[1] * hop_size] - - if not return_linear: - return wav, mel - else: - spc = audio.amp_to_db(spc) - spc = audio.normalize(spc, {'min_level_db': min_level_db}) - return wav, mel, spc - - -def get_pitch(wav_data, mel, hparams): - """ - - :param wav_data: [T] - :param mel: [T, 80] - :param hparams: - :return: - """ - time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 - f0_min = 80 - f0_max = 750 - - if hparams['hop_size'] == 128: - pad_size = 4 - elif hparams['hop_size'] == 256: - pad_size = 2 - else: - assert False - - f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - lpad = pad_size * 2 - rpad = len(mel) - len(f0) - lpad - f0 = np.pad(f0, [[lpad, rpad]], mode='constant') - # mel and f0 are extracted by 2 different libraries. we should force them to have the same length. - # Attention: we find that new version of some libraries could cause ``rpad'' to be a negetive value... - # Just to be sure, we recommend users to set up the same environments as them in requirements_auto.txt (by Anaconda) - delta_l = len(mel) - len(f0) - assert np.abs(delta_l) <= 8 - if delta_l > 0: - f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0) - f0 = f0[:len(mel)] - pitch_coarse = f0_to_coarse(f0) - return f0, pitch_coarse - - -def remove_empty_lines(text): - """remove empty lines""" - assert (len(text) > 0) - assert (isinstance(text, list)) - text = [t.strip() for t in text] - if "" in text: - text.remove("") - return text - - -class TextGrid(object): - def __init__(self, text): - text = remove_empty_lines(text) - self.text = text - self.line_count = 0 - self._get_type() - self._get_time_intval() - self._get_size() - self.tier_list = [] - self._get_item_list() - - def _extract_pattern(self, pattern, inc): - """ - Parameters - ---------- - pattern : regex to extract pattern - inc : increment of line count after extraction - Returns - ------- - group : extracted info - """ - try: - group = re.match(pattern, self.text[self.line_count]).group(1) - self.line_count += inc - except AttributeError: - raise ValueError("File format error at line %d:%s" % (self.line_count, self.text[self.line_count])) - return group - - def _get_type(self): - self.file_type = self._extract_pattern(r"File type = \"(.*)\"", 2) - - def _get_time_intval(self): - self.xmin = self._extract_pattern(r"xmin = (.*)", 1) - self.xmax = self._extract_pattern(r"xmax = (.*)", 2) - - def _get_size(self): - self.size = int(self._extract_pattern(r"size = (.*)", 2)) - - def _get_item_list(self): - """Only supports IntervalTier currently""" - for itemIdx in range(1, self.size + 1): - tier = OrderedDict() - item_list = [] - tier_idx = self._extract_pattern(r"item \[(.*)\]:", 1) - tier_class = self._extract_pattern(r"class = \"(.*)\"", 1) - if tier_class != "IntervalTier": - raise NotImplementedError("Only IntervalTier class is supported currently") - tier_name = self._extract_pattern(r"name = \"(.*)\"", 1) - tier_xmin = self._extract_pattern(r"xmin = (.*)", 1) - tier_xmax = self._extract_pattern(r"xmax = (.*)", 1) - tier_size = self._extract_pattern(r"intervals: size = (.*)", 1) - for i in range(int(tier_size)): - item = OrderedDict() - item["idx"] = self._extract_pattern(r"intervals \[(.*)\]", 1) - item["xmin"] = self._extract_pattern(r"xmin = (.*)", 1) - item["xmax"] = self._extract_pattern(r"xmax = (.*)", 1) - item["text"] = self._extract_pattern(r"text = \"(.*)\"", 1) - item_list.append(item) - tier["idx"] = tier_idx - tier["class"] = tier_class - tier["name"] = tier_name - tier["xmin"] = tier_xmin - tier["xmax"] = tier_xmax - tier["size"] = tier_size - tier["items"] = item_list - self.tier_list.append(tier) - - def toJson(self): - _json = OrderedDict() - _json["file_type"] = self.file_type - _json["xmin"] = self.xmin - _json["xmax"] = self.xmax - _json["size"] = self.size - _json["tiers"] = self.tier_list - return json.dumps(_json, ensure_ascii=False, indent=2) - - -def get_mel2ph(tg_fn, ph, mel, hparams): - ph_list = ph.split(" ") - with open(tg_fn, "r") as f: - tg = f.readlines() - tg = remove_empty_lines(tg) - tg = TextGrid(tg) - tg = json.loads(tg.toJson()) - split = np.ones(len(ph_list) + 1, np.float) * -1 - tg_idx = 0 - ph_idx = 0 - tg_align = [x for x in tg['tiers'][-1]['items']] - tg_align_ = [] - for x in tg_align: - x['xmin'] = float(x['xmin']) - x['xmax'] = float(x['xmax']) - if x['text'] in ['sil', 'sp', '', 'SIL', 'PUNC']: - x['text'] = '' - if len(tg_align_) > 0 and tg_align_[-1]['text'] == '': - tg_align_[-1]['xmax'] = x['xmax'] - continue - tg_align_.append(x) - tg_align = tg_align_ - tg_len = len([x for x in tg_align if x['text'] != '']) - ph_len = len([x for x in ph_list if not is_sil_phoneme(x)]) - assert tg_len == ph_len, (tg_len, ph_len, tg_align, ph_list, tg_fn) - while tg_idx < len(tg_align) or ph_idx < len(ph_list): - if tg_idx == len(tg_align) and is_sil_phoneme(ph_list[ph_idx]): - split[ph_idx] = 1e8 - ph_idx += 1 - continue - x = tg_align[tg_idx] - if x['text'] == '' and ph_idx == len(ph_list): - tg_idx += 1 - continue - assert ph_idx < len(ph_list), (tg_len, ph_len, tg_align, ph_list, tg_fn) - ph = ph_list[ph_idx] - if x['text'] == '' and not is_sil_phoneme(ph): - assert False, (ph_list, tg_align) - if x['text'] != '' and is_sil_phoneme(ph): - ph_idx += 1 - else: - assert (x['text'] == '' and is_sil_phoneme(ph)) \ - or x['text'].lower() == ph.lower() \ - or x['text'].lower() == 'sil', (x['text'], ph) - split[ph_idx] = x['xmin'] - if ph_idx > 0 and split[ph_idx - 1] == -1 and is_sil_phoneme(ph_list[ph_idx - 1]): - split[ph_idx - 1] = split[ph_idx] - ph_idx += 1 - tg_idx += 1 - assert tg_idx == len(tg_align), (tg_idx, [x['text'] for x in tg_align]) - assert ph_idx >= len(ph_list) - 1, (ph_idx, ph_list, len(ph_list), [x['text'] for x in tg_align], tg_fn) - mel2ph = np.zeros([mel.shape[0]], np.int) - split[0] = 0 - split[-1] = 1e8 - for i in range(len(split) - 1): - assert split[i] != -1 and split[i] <= split[i + 1], (split[:-1],) - split = [int(s * hparams['audio_sample_rate'] / hparams['hop_size'] + 0.5) for s in split] - for ph_idx in range(len(ph_list)): - mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1 - mel2ph_torch = torch.from_numpy(mel2ph) - T_t = len(ph_list) - dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch)) - dur = dur[1:].numpy() - return mel2ph, dur - - -def build_phone_encoder(data_dir): - phone_list_file = os.path.join(data_dir, 'phone_set.json') - phone_list = json.load(open(phone_list_file)) - return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') - - -def is_sil_phoneme(p): - return not p[0].isalpha() - - -def build_token_encoder(token_list_file): - token_list = json.load(open(token_list_file)) - return TokenTextEncoder(None, vocab_list=token_list, replace_oov='') diff --git a/spaces/RyanX/BookSearch/bert/__init__.py b/spaces/RyanX/BookSearch/bert/__init__.py deleted file mode 100644 index effb57b1e893fc03b3782961deb060749083c696..0000000000000000000000000000000000000000 --- a/spaces/RyanX/BookSearch/bert/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/spaces/SERER/VITS-Umamusume-voice-synthesizer/modules.py b/spaces/SERER/VITS-Umamusume-voice-synthesizer/modules.py deleted file mode 100644 index f5af1fd9a20dc03707889f360a39bb4b784a6df3..0000000000000000000000000000000000000000 --- a/spaces/SERER/VITS-Umamusume-voice-synthesizer/modules.py +++ /dev/null @@ -1,387 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/commons.py b/spaces/SQSora/VITS-Umamusume-voice-synthesizer/commons.py deleted file mode 100644 index 2153153f527d94e2abb641ea00c80b518ff6c5bd..0000000000000000000000000000000000000000 --- a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/commons.py +++ /dev/null @@ -1,97 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path diff --git a/spaces/Sandiago21/text-to-speech-italian/README.md b/spaces/Sandiago21/text-to-speech-italian/README.md deleted file mode 100644 index bad406206409615a9f276a1f8b0ad74326b3f37b..0000000000000000000000000000000000000000 --- a/spaces/Sandiago21/text-to-speech-italian/README.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: text-to-speech-italian -app_file: app.py -sdk: gradio -sdk_version: 3.36.0 ---- diff --git a/spaces/ShrapTy/GPT4ALL/app.py b/spaces/ShrapTy/GPT4ALL/app.py deleted file mode 100644 index e4e42e2856cba50c0c6be565aa5e4d5eabe16b82..0000000000000000000000000000000000000000 --- a/spaces/ShrapTy/GPT4ALL/app.py +++ /dev/null @@ -1,140 +0,0 @@ -from __future__ import annotations -from typing import Iterable -import gradio as gr -from gradio.themes.base import Base -from gradio.themes.utils import colors, fonts, sizes - -from llama_cpp import Llama -#from huggingface_hub import hf_hub_download - -#hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".") -llm = Llama(model_path="./ggjt-model.bin") - - -ins = '''### Instruction: -{} -### Response: -''' - -theme = gr.themes.Monochrome( - primary_hue="indigo", - secondary_hue="blue", - neutral_hue="slate", - radius_size=gr.themes.sizes.radius_sm, - font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"], -) - - - - -# def generate(instruction): -# response = llm(ins.format(instruction)) -# response = response['choices'][0]['text'] -# result = "" -# for word in response.split(" "): -# result += word + " " -# yield result - -def generate(instruction): - result = "" - for x in llm(ins.format(instruction), stop=['### Instruction:', '### End'], stream=True): - result += x['choices'][0]['text'] - yield result - - -examples = [ - "Today I will be present, I intend to spread positive energy, I will be patient", - "This week I will be mindful and considerate of others", - "I will be well spoken and intentional", -] - -def process_example(args): - for x in generate(args): - pass - return x - -css = ".generating {visibility: hidden}" - -# Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo -class SeafoamCustom(Base): - def __init__( - self, - *, - primary_hue: colors.Color | str = colors.emerald, - secondary_hue: colors.Color | str = colors.blue, - neutral_hue: colors.Color | str = colors.blue, - spacing_size: sizes.Size | str = sizes.spacing_md, - radius_size: sizes.Size | str = sizes.radius_md, - font: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - fonts.GoogleFont("Quicksand"), - "ui-sans-serif", - "sans-serif", - ), - font_mono: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - fonts.GoogleFont("IBM Plex Mono"), - "ui-monospace", - "monospace", - ), - ): - super().__init__( - primary_hue=primary_hue, - secondary_hue=secondary_hue, - neutral_hue=neutral_hue, - spacing_size=spacing_size, - radius_size=radius_size, - font=font, - font_mono=font_mono, - ) - super().set( - button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)", - button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)", - button_primary_text_color="white", - button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)", - block_shadow="*shadow_drop_lg", - button_shadow="*shadow_drop_lg", - input_background_fill="zinc", - input_border_color="*secondary_300", - input_shadow="*shadow_drop", - input_shadow_focus="*shadow_drop_lg", - ) - - -seafoam = SeafoamCustom() - - -with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo: - with gr.Column(): - gr.Markdown( - """ ## - - Type in the box below and click the button to generate answers to your most pressing questions! - - """ - ) - - with gr.Row(): - with gr.Column(scale=3): - instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input") - - with gr.Box(): - gr.Markdown("**Answer**") - output = gr.Markdown(elem_id="q-output") - submit = gr.Button("Generate", variant="primary") - gr.Examples( - examples=examples, - inputs=[instruction], - cache_examples=False, - fn=process_example, - outputs=[output], - ) - - - - submit.click(generate, inputs=[instruction], outputs=[output]) - instruction.submit(generate, inputs=[instruction], outputs=[output]) - -demo.queue(concurrency_count=1).launch(debug=True) \ No newline at end of file diff --git a/spaces/Shruhrid/Next_Word_Prediction/final_maybe.py b/spaces/Shruhrid/Next_Word_Prediction/final_maybe.py deleted file mode 100644 index 6231fbd27ed742cd12f5ce74341c17d737659d33..0000000000000000000000000000000000000000 --- a/spaces/Shruhrid/Next_Word_Prediction/final_maybe.py +++ /dev/null @@ -1,196 +0,0 @@ -# -*- coding: utf-8 -*- -"""final-maybe - -Automatically generated by Colaboratory. - -Original file is located at - https://colab.research.google.com/drive/1UueiutTkBBB9Gb2Brp4mQVUvw7cjXTwo -""" - -import nltk - -SOS = " " -EOS = "" -UNK = "" - -"""Add Sentence Tokens: - - To identify the beginning and end of the sentence - add the StartOfSentence and EndOfSentence tokens. - - The argument 'sentences' takes a list of str and 'n' is the order of the model. - The function returns the list of generated of sentences. - - For bigram models (or greater) both tokens are added otherwise or only one is added. -""" - -def add_sentence_tokens(sentences, n): - sos = SOS * (n-1) if n > 1 else SOS - return ['{}{} {}'.format(sos, s, EOS) for s in sentences] - -"""Replace singletons: - - For the tokens appearing only ones in the corpus, replace it with - - The argument 'tokens' takes input of the tokens comprised in the corpus. - The function returns list of tokens after replacing each singleton with -""" - -def replace_singletons(tokens): - vocab = nltk.FreqDist(tokens) - return [token if vocab[token] > 1 else UNK for token in tokens] - -"""Preprocess: - - The function takes the argument 'sentences' that takes the list of str of - preprocess. The argument 'n' is the order of the model. - Adds the above three tokens to the sentences and tokenize. - The function returns preprocessed sentences. -""" - -def preprocess(sentences, n): - sentences = add_sentence_tokens(sentences, n) - tokens = ' '.join(sentences).split(' ') - tokens = replace_singletons(tokens) - return tokens - - -from itertools import product -import math -from pathlib import Path - -""" This function loads training and testing corpus from a directory. - The argument 'data_dir' contains path of the directory. The directory should contain files: 'train.txt' and 'test.txt' - Function will return train and test sets as lists of sentences. -""" - -def load_data(data_dir): - train_path = data_dir + 'train.txt' - test_path = data_dir + 'test.txt' - - with open(train_path, 'r') as f: - train = [l.strip() for l in f.readlines()] - with open(test_path, 'r') as f: - test = [l.strip() for l in f.readlines()] - return train, test - -"""Trained N-gram model: - - A trained model for the given corpus is constructed by preprocessing the - corpus and calculating the smoothed probabilities of each n-gram. - The arguments contains training data (list of strings), n (integer; order of the model), - and an integer used for laplace smoothing. - Further, the model has a method for calculating perplexity. -""" - -class LanguageModel(object): - def __init__(self, train_data, n, laplace=1): - self.n = n - self.laplace = laplace - self.tokens = preprocess(train_data, n) - self.vocab = nltk.FreqDist(self.tokens) - self.model = self._create_model() - self.masks = list(reversed(list(product((0,1), repeat=n)))) - - def _smooth(self): - """ - The n tokens of n-gram in training corpus and first n-1 tokens of each n-gram - results in Laplace smoothenedd probability. - The function returns the smoothened probability mapped to its n-gram. - - """ - vocab_size = len(self.vocab) - - n_grams = nltk.ngrams(self.tokens, self.n) - n_vocab = nltk.FreqDist(n_grams) - - m_grams = nltk.ngrams(self.tokens, self.n-1) - m_vocab = nltk.FreqDist(m_grams) - - def smoothed_count(n_gram, n_count): - m_gram = n_gram[:-1] - m_count = m_vocab[m_gram] - return (n_count + self.laplace) / (m_count + self.laplace * vocab_size) - - return { n_gram: smoothed_count(n_gram, count) for n_gram, count in n_vocab.items() } - - def _create_model(self): - """ - This function creates a probability distribution of the vocabulary of training corpus. - The probabilities in a unigram model are simply relative frequencies of each token over the whole corpus. - Otherwise, the relative frequencies are Laplace-smoothed probabilities. - Function returns a dictionary which maps each n-gram, which is in the form of tuple of strings, to its probabilities (float) - - """ - if self.n == 1: - num_tokens = len(self.tokens) - return { (unigram,): count / num_tokens for unigram, count in self.vocab.items() } - else: - return self._smooth() - - def _convert_oov(self, ngram): - """ - This function handles the words which are encountered in the test and converts the given n-gram to one which is known by the model. - Stop when the model contains an entry for every permutation. - The function returns n-gram with tokens in certain positions such that the model - contains an entry for it. - """ - mask = lambda ngram, bitmask: tuple((token if flag == 1 else "" for token,flag in zip(ngram, bitmask))) - - ngram = (ngram,) if type(ngram) is str else ngram - for possible_known in [mask(ngram, bitmask) for bitmask in self.masks]: - if possible_known in self.model: - return possible_known - - def perplexity(self, test_data): - """ - Perplexity of the model is calculated using the sentences and returns - a float value. - - """ - test_tokens = preprocess(test_data, self.n) - test_ngrams = nltk.ngrams(test_tokens, self.n) - N = len(test_tokens) - - known_ngrams = (self._convert_oov(ngram) for ngram in test_ngrams) - probabilities = [self.model[ngram] for ngram in known_ngrams] - - return math.exp((-1/N) * sum(map(math.log, probabilities))) - - def _best_candidate(self, prev, i, without=[]): - """ - Selects the most probable token depending on the basis of previous - (n-1) tokens. - The function takes the argument of previous (n-1) tokens, and the tokens to - exclude from candidates list. - The function returns the most probable token and its probability. - - """ - blacklist = [""] + without - candidates = ((ngram[-1],prob) for ngram,prob in self.model.items() if ngram[:-1]==prev) - candidates = filter(lambda candidate: candidate[0] not in blacklist, candidates) - candidates = sorted(candidates, key=lambda candidate: candidate[1], reverse=True) - if len(candidates) == 0: - return ("", 1) - else: - return candidates[0 if prev != () and prev[-1] != "" else i] - -# data_path = '/content/drive/Shareddrives/MathProject22/Dataset/data/' -# train, test = load_data(data_path) - -# #if __name__ == '__main__': -# model_instance= LanguageModel(train[0:100], 3, 0) -# # first number is the n of n gram -# # second number is the coefficient whether laplace used or not - -# print(model_instance.perplexity(test)) - -# prev=('I','love',) -# print(model_instance._best_candidate(prev,1)[0]) -# # `1 is ith best fit as a candidate - -# import pickle -# filename = 'without_laplace.sav' -# pickle.dump(model_instance, open(filename, 'wb')) - -# len(train) \ No newline at end of file diff --git a/spaces/SpacesExamples/Gradio-Docker-Template/Dockerfile b/spaces/SpacesExamples/Gradio-Docker-Template/Dockerfile deleted file mode 100644 index c843fc3ece612cb0b9c53e6d58b485942a9ff19d..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/Gradio-Docker-Template/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH \ - PYTHONPATH=$HOME/app \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -CMD ["python", "app.py"] \ No newline at end of file diff --git a/spaces/SudhanshuBlaze/text-generation-gpt-neo/app.py b/spaces/SudhanshuBlaze/text-generation-gpt-neo/app.py deleted file mode 100644 index cc7df42cae2f961fc2f87b859e5c2b8d0718fa2d..0000000000000000000000000000000000000000 --- a/spaces/SudhanshuBlaze/text-generation-gpt-neo/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import streamlit as st -from transformers import pipeline - -st.title("AI text-gen Web-app") -st.write("This is a auto-complete/text generation web-app powered by GPT-neo. GPT-Neo 125M is a transformer model designed using EleutherAI's replication of the GPT-3 architecture. GPT-Neo refers to the class of models, while 125M represents the number of parameters of this particular pre-trained model.") - -# instantiate the model / download -@st.cache(allow_output_mutation=True) -def load_model(): - generator = pipeline('text-generation', model='EleutherAI/gpt-neo-125M') - return (generator) - -generator=load_model() - -min_length=st.slider( - 'Specify Min length of the text of want to be generated', - 10, 100, 20) -max_length=st.slider( - 'Specify Max length of the text of want to be generated', - 20, 150, 30) - -# create a prompt text for the text generation -prompt_text = st.text_input( - label = "Type some text here and this model will generate more....", - value="We live in a society") - - -if(max_length<=min_length): - st.error("max_length cannot be less than equal to min_length") -else: - with st.spinner("AI is at Work........"): - gpt_text = generator( - prompt_text, - min_length=min_length, - max_length=max_length, - do_sample=True)[0]["generated_text"] - st.success("Successfully generated the below text:") - st.write(gpt_text) \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/gtk3.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/gtk3.py deleted file mode 100644 index b073bd94d9987612689047d288034e423b897b69..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/gtk3.py +++ /dev/null @@ -1,14 +0,0 @@ -"""prompt_toolkit input hook for GTK 3 -""" - -from gi.repository import Gtk, GLib - - -def _main_quit(*args, **kwargs): - Gtk.main_quit() - return False - - -def inputhook(context): - GLib.io_add_watch(context.fileno(), GLib.PRIORITY_DEFAULT, GLib.IO_IN, _main_quit) - Gtk.main() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/dbapi/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/dbapi/__init__.py deleted file mode 100644 index ea792b49683ed3ebf6dd6b09146029c982716363..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/dbapi/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Optional - -from clickhouse_connect.dbapi.connection import Connection - - -apilevel = '2.0' # PEP 249 DB API level -threadsafety = 2 # PEP 249 Threads may share the module and connections. -paramstyle = 'pyformat' # PEP 249 Python extended format codes, e.g. ...WHERE name=%(name)s - - -class Error(Exception): - pass - - -def connect(host: Optional[str] = None, - database: Optional[str] = None, - username: Optional[str] = '', - password: Optional[str] = '', - port: Optional[int] = None, - **kwargs): - secure = kwargs.pop('secure', False) - return Connection(host=host, - database=database, - username=username, - password=password, - port=port, - secure=secure, - **kwargs) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/base_runner.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/base_runner.py deleted file mode 100644 index 4928db0a73b56fe0218a4bf66ec4ffa082d31ccc..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/base_runner.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import logging -import os.path as osp -import warnings -from abc import ABCMeta, abstractmethod - -import torch -from torch.optim import Optimizer - -import annotator.uniformer.mmcv as mmcv -from ..parallel import is_module_wrapper -from .checkpoint import load_checkpoint -from .dist_utils import get_dist_info -from .hooks import HOOKS, Hook -from .log_buffer import LogBuffer -from .priority import Priority, get_priority -from .utils import get_time_str - - -class BaseRunner(metaclass=ABCMeta): - """The base class of Runner, a training helper for PyTorch. - - All subclasses should implement the following APIs: - - - ``run()`` - - ``train()`` - - ``val()`` - - ``save_checkpoint()`` - - Args: - model (:obj:`torch.nn.Module`): The model to be run. - batch_processor (callable): A callable method that process a data - batch. The interface of this method should be - `batch_processor(model, data, train_mode) -> dict` - optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an - optimizer (in most cases) or a dict of optimizers (in models that - requires more than one optimizer, e.g., GAN). - work_dir (str, optional): The working directory to save checkpoints - and logs. Defaults to None. - logger (:obj:`logging.Logger`): Logger used during training. - Defaults to None. (The default value is just for backward - compatibility) - meta (dict | None): A dict records some import information such as - environment info and seed, which will be logged in logger hook. - Defaults to None. - max_epochs (int, optional): Total training epochs. - max_iters (int, optional): Total training iterations. - """ - - def __init__(self, - model, - batch_processor=None, - optimizer=None, - work_dir=None, - logger=None, - meta=None, - max_iters=None, - max_epochs=None): - if batch_processor is not None: - if not callable(batch_processor): - raise TypeError('batch_processor must be callable, ' - f'but got {type(batch_processor)}') - warnings.warn('batch_processor is deprecated, please implement ' - 'train_step() and val_step() in the model instead.') - # raise an error is `batch_processor` is not None and - # `model.train_step()` exists. - if is_module_wrapper(model): - _model = model.module - else: - _model = model - if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'): - raise RuntimeError( - 'batch_processor and model.train_step()/model.val_step() ' - 'cannot be both available.') - else: - assert hasattr(model, 'train_step') - - # check the type of `optimizer` - if isinstance(optimizer, dict): - for name, optim in optimizer.items(): - if not isinstance(optim, Optimizer): - raise TypeError( - f'optimizer must be a dict of torch.optim.Optimizers, ' - f'but optimizer["{name}"] is a {type(optim)}') - elif not isinstance(optimizer, Optimizer) and optimizer is not None: - raise TypeError( - f'optimizer must be a torch.optim.Optimizer object ' - f'or dict or None, but got {type(optimizer)}') - - # check the type of `logger` - if not isinstance(logger, logging.Logger): - raise TypeError(f'logger must be a logging.Logger object, ' - f'but got {type(logger)}') - - # check the type of `meta` - if meta is not None and not isinstance(meta, dict): - raise TypeError( - f'meta must be a dict or None, but got {type(meta)}') - - self.model = model - self.batch_processor = batch_processor - self.optimizer = optimizer - self.logger = logger - self.meta = meta - # create work_dir - if mmcv.is_str(work_dir): - self.work_dir = osp.abspath(work_dir) - mmcv.mkdir_or_exist(self.work_dir) - elif work_dir is None: - self.work_dir = None - else: - raise TypeError('"work_dir" must be a str or None') - - # get model name from the model class - if hasattr(self.model, 'module'): - self._model_name = self.model.module.__class__.__name__ - else: - self._model_name = self.model.__class__.__name__ - - self._rank, self._world_size = get_dist_info() - self.timestamp = get_time_str() - self.mode = None - self._hooks = [] - self._epoch = 0 - self._iter = 0 - self._inner_iter = 0 - - if max_epochs is not None and max_iters is not None: - raise ValueError( - 'Only one of `max_epochs` or `max_iters` can be set.') - - self._max_epochs = max_epochs - self._max_iters = max_iters - # TODO: Redesign LogBuffer, it is not flexible and elegant enough - self.log_buffer = LogBuffer() - - @property - def model_name(self): - """str: Name of the model, usually the module class name.""" - return self._model_name - - @property - def rank(self): - """int: Rank of current process. (distributed training)""" - return self._rank - - @property - def world_size(self): - """int: Number of processes participating in the job. - (distributed training)""" - return self._world_size - - @property - def hooks(self): - """list[:obj:`Hook`]: A list of registered hooks.""" - return self._hooks - - @property - def epoch(self): - """int: Current epoch.""" - return self._epoch - - @property - def iter(self): - """int: Current iteration.""" - return self._iter - - @property - def inner_iter(self): - """int: Iteration in an epoch.""" - return self._inner_iter - - @property - def max_epochs(self): - """int: Maximum training epochs.""" - return self._max_epochs - - @property - def max_iters(self): - """int: Maximum training iterations.""" - return self._max_iters - - @abstractmethod - def train(self): - pass - - @abstractmethod - def val(self): - pass - - @abstractmethod - def run(self, data_loaders, workflow, **kwargs): - pass - - @abstractmethod - def save_checkpoint(self, - out_dir, - filename_tmpl, - save_optimizer=True, - meta=None, - create_symlink=True): - pass - - def current_lr(self): - """Get current learning rates. - - Returns: - list[float] | dict[str, list[float]]: Current learning rates of all - param groups. If the runner has a dict of optimizers, this - method will return a dict. - """ - if isinstance(self.optimizer, torch.optim.Optimizer): - lr = [group['lr'] for group in self.optimizer.param_groups] - elif isinstance(self.optimizer, dict): - lr = dict() - for name, optim in self.optimizer.items(): - lr[name] = [group['lr'] for group in optim.param_groups] - else: - raise RuntimeError( - 'lr is not applicable because optimizer does not exist.') - return lr - - def current_momentum(self): - """Get current momentums. - - Returns: - list[float] | dict[str, list[float]]: Current momentums of all - param groups. If the runner has a dict of optimizers, this - method will return a dict. - """ - - def _get_momentum(optimizer): - momentums = [] - for group in optimizer.param_groups: - if 'momentum' in group.keys(): - momentums.append(group['momentum']) - elif 'betas' in group.keys(): - momentums.append(group['betas'][0]) - else: - momentums.append(0) - return momentums - - if self.optimizer is None: - raise RuntimeError( - 'momentum is not applicable because optimizer does not exist.') - elif isinstance(self.optimizer, torch.optim.Optimizer): - momentums = _get_momentum(self.optimizer) - elif isinstance(self.optimizer, dict): - momentums = dict() - for name, optim in self.optimizer.items(): - momentums[name] = _get_momentum(optim) - return momentums - - def register_hook(self, hook, priority='NORMAL'): - """Register a hook into the hook list. - - The hook will be inserted into a priority queue, with the specified - priority (See :class:`Priority` for details of priorities). - For hooks with the same priority, they will be triggered in the same - order as they are registered. - - Args: - hook (:obj:`Hook`): The hook to be registered. - priority (int or str or :obj:`Priority`): Hook priority. - Lower value means higher priority. - """ - assert isinstance(hook, Hook) - if hasattr(hook, 'priority'): - raise ValueError('"priority" is a reserved attribute for hooks') - priority = get_priority(priority) - hook.priority = priority - # insert the hook to a sorted list - inserted = False - for i in range(len(self._hooks) - 1, -1, -1): - if priority >= self._hooks[i].priority: - self._hooks.insert(i + 1, hook) - inserted = True - break - if not inserted: - self._hooks.insert(0, hook) - - def register_hook_from_cfg(self, hook_cfg): - """Register a hook from its cfg. - - Args: - hook_cfg (dict): Hook config. It should have at least keys 'type' - and 'priority' indicating its type and priority. - - Notes: - The specific hook class to register should not use 'type' and - 'priority' arguments during initialization. - """ - hook_cfg = hook_cfg.copy() - priority = hook_cfg.pop('priority', 'NORMAL') - hook = mmcv.build_from_cfg(hook_cfg, HOOKS) - self.register_hook(hook, priority=priority) - - def call_hook(self, fn_name): - """Call all hooks. - - Args: - fn_name (str): The function name in each hook to be called, such as - "before_train_epoch". - """ - for hook in self._hooks: - getattr(hook, fn_name)(self) - - def get_hook_info(self): - # Get hooks info in each stage - stage_hook_map = {stage: [] for stage in Hook.stages} - for hook in self.hooks: - try: - priority = Priority(hook.priority).name - except ValueError: - priority = hook.priority - classname = hook.__class__.__name__ - hook_info = f'({priority:<12}) {classname:<35}' - for trigger_stage in hook.get_triggered_stages(): - stage_hook_map[trigger_stage].append(hook_info) - - stage_hook_infos = [] - for stage in Hook.stages: - hook_infos = stage_hook_map[stage] - if len(hook_infos) > 0: - info = f'{stage}:\n' - info += '\n'.join(hook_infos) - info += '\n -------------------- ' - stage_hook_infos.append(info) - return '\n'.join(stage_hook_infos) - - def load_checkpoint(self, - filename, - map_location='cpu', - strict=False, - revise_keys=[(r'^module.', '')]): - return load_checkpoint( - self.model, - filename, - map_location, - strict, - self.logger, - revise_keys=revise_keys) - - def resume(self, - checkpoint, - resume_optimizer=True, - map_location='default'): - if map_location == 'default': - if torch.cuda.is_available(): - device_id = torch.cuda.current_device() - checkpoint = self.load_checkpoint( - checkpoint, - map_location=lambda storage, loc: storage.cuda(device_id)) - else: - checkpoint = self.load_checkpoint(checkpoint) - else: - checkpoint = self.load_checkpoint( - checkpoint, map_location=map_location) - - self._epoch = checkpoint['meta']['epoch'] - self._iter = checkpoint['meta']['iter'] - if self.meta is None: - self.meta = {} - self.meta.setdefault('hook_msgs', {}) - # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages - self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {})) - - # Re-calculate the number of iterations when resuming - # models with different number of GPUs - if 'config' in checkpoint['meta']: - config = mmcv.Config.fromstring( - checkpoint['meta']['config'], file_format='.py') - previous_gpu_ids = config.get('gpu_ids', None) - if previous_gpu_ids and len(previous_gpu_ids) > 0 and len( - previous_gpu_ids) != self.world_size: - self._iter = int(self._iter * len(previous_gpu_ids) / - self.world_size) - self.logger.info('the iteration number is changed due to ' - 'change of GPU number') - - # resume meta information meta - self.meta = checkpoint['meta'] - - if 'optimizer' in checkpoint and resume_optimizer: - if isinstance(self.optimizer, Optimizer): - self.optimizer.load_state_dict(checkpoint['optimizer']) - elif isinstance(self.optimizer, dict): - for k in self.optimizer.keys(): - self.optimizer[k].load_state_dict( - checkpoint['optimizer'][k]) - else: - raise TypeError( - 'Optimizer should be dict or torch.optim.Optimizer ' - f'but got {type(self.optimizer)}') - - self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) - - def register_lr_hook(self, lr_config): - if lr_config is None: - return - elif isinstance(lr_config, dict): - assert 'policy' in lr_config - policy_type = lr_config.pop('policy') - # If the type of policy is all in lower case, e.g., 'cyclic', - # then its first letter will be capitalized, e.g., to be 'Cyclic'. - # This is for the convenient usage of Lr updater. - # Since this is not applicable for ` - # CosineAnnealingLrUpdater`, - # the string will not be changed if it contains capital letters. - if policy_type == policy_type.lower(): - policy_type = policy_type.title() - hook_type = policy_type + 'LrUpdaterHook' - lr_config['type'] = hook_type - hook = mmcv.build_from_cfg(lr_config, HOOKS) - else: - hook = lr_config - self.register_hook(hook, priority='VERY_HIGH') - - def register_momentum_hook(self, momentum_config): - if momentum_config is None: - return - if isinstance(momentum_config, dict): - assert 'policy' in momentum_config - policy_type = momentum_config.pop('policy') - # If the type of policy is all in lower case, e.g., 'cyclic', - # then its first letter will be capitalized, e.g., to be 'Cyclic'. - # This is for the convenient usage of momentum updater. - # Since this is not applicable for - # `CosineAnnealingMomentumUpdater`, - # the string will not be changed if it contains capital letters. - if policy_type == policy_type.lower(): - policy_type = policy_type.title() - hook_type = policy_type + 'MomentumUpdaterHook' - momentum_config['type'] = hook_type - hook = mmcv.build_from_cfg(momentum_config, HOOKS) - else: - hook = momentum_config - self.register_hook(hook, priority='HIGH') - - def register_optimizer_hook(self, optimizer_config): - if optimizer_config is None: - return - if isinstance(optimizer_config, dict): - optimizer_config.setdefault('type', 'OptimizerHook') - hook = mmcv.build_from_cfg(optimizer_config, HOOKS) - else: - hook = optimizer_config - self.register_hook(hook, priority='ABOVE_NORMAL') - - def register_checkpoint_hook(self, checkpoint_config): - if checkpoint_config is None: - return - if isinstance(checkpoint_config, dict): - checkpoint_config.setdefault('type', 'CheckpointHook') - hook = mmcv.build_from_cfg(checkpoint_config, HOOKS) - else: - hook = checkpoint_config - self.register_hook(hook, priority='NORMAL') - - def register_logger_hooks(self, log_config): - if log_config is None: - return - log_interval = log_config['interval'] - for info in log_config['hooks']: - logger_hook = mmcv.build_from_cfg( - info, HOOKS, default_args=dict(interval=log_interval)) - self.register_hook(logger_hook, priority='VERY_LOW') - - def register_timer_hook(self, timer_config): - if timer_config is None: - return - if isinstance(timer_config, dict): - timer_config_ = copy.deepcopy(timer_config) - hook = mmcv.build_from_cfg(timer_config_, HOOKS) - else: - hook = timer_config - self.register_hook(hook, priority='LOW') - - def register_custom_hooks(self, custom_config): - if custom_config is None: - return - - if not isinstance(custom_config, list): - custom_config = [custom_config] - - for item in custom_config: - if isinstance(item, dict): - self.register_hook_from_cfg(item) - else: - self.register_hook(item, priority='NORMAL') - - def register_profiler_hook(self, profiler_config): - if profiler_config is None: - return - if isinstance(profiler_config, dict): - profiler_config.setdefault('type', 'ProfilerHook') - hook = mmcv.build_from_cfg(profiler_config, HOOKS) - else: - hook = profiler_config - self.register_hook(hook) - - def register_training_hooks(self, - lr_config, - optimizer_config=None, - checkpoint_config=None, - log_config=None, - momentum_config=None, - timer_config=dict(type='IterTimerHook'), - custom_hooks_config=None): - """Register default and custom hooks for training. - - Default and custom hooks include: - - +----------------------+-------------------------+ - | Hooks | Priority | - +======================+=========================+ - | LrUpdaterHook | VERY_HIGH (10) | - +----------------------+-------------------------+ - | MomentumUpdaterHook | HIGH (30) | - +----------------------+-------------------------+ - | OptimizerStepperHook | ABOVE_NORMAL (40) | - +----------------------+-------------------------+ - | CheckpointSaverHook | NORMAL (50) | - +----------------------+-------------------------+ - | IterTimerHook | LOW (70) | - +----------------------+-------------------------+ - | LoggerHook(s) | VERY_LOW (90) | - +----------------------+-------------------------+ - | CustomHook(s) | defaults to NORMAL (50) | - +----------------------+-------------------------+ - - If custom hooks have same priority with default hooks, custom hooks - will be triggered after default hooks. - """ - self.register_lr_hook(lr_config) - self.register_momentum_hook(momentum_config) - self.register_optimizer_hook(optimizer_config) - self.register_checkpoint_hook(checkpoint_config) - self.register_timer_hook(timer_config) - self.register_logger_hooks(log_config) - self.register_custom_hooks(custom_hooks_config) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/models/link.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/models/link.py deleted file mode 100644 index 4453519ad0202281cfa53b3ca2a0282a9b0a1799..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/models/link.py +++ /dev/null @@ -1,581 +0,0 @@ -import functools -import itertools -import logging -import os -import posixpath -import re -import urllib.parse -from dataclasses import dataclass -from typing import ( - TYPE_CHECKING, - Any, - Dict, - List, - Mapping, - NamedTuple, - Optional, - Tuple, - Union, -) - -from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.filetypes import WHEEL_EXTENSION -from pip._internal.utils.hashes import Hashes -from pip._internal.utils.misc import ( - pairwise, - redact_auth_from_url, - split_auth_from_netloc, - splitext, -) -from pip._internal.utils.models import KeyBasedCompareMixin -from pip._internal.utils.urls import path_to_url, url_to_path - -if TYPE_CHECKING: - from pip._internal.index.collector import IndexContent - -logger = logging.getLogger(__name__) - - -# Order matters, earlier hashes have a precedence over later hashes for what -# we will pick to use. -_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5") - - -@dataclass(frozen=True) -class LinkHash: - """Links to content may have embedded hash values. This class parses those. - - `name` must be any member of `_SUPPORTED_HASHES`. - - This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to - be JSON-serializable to conform to PEP 610, this class contains the logic for - parsing a hash name and value for correctness, and then checking whether that hash - conforms to a schema with `.is_hash_allowed()`.""" - - name: str - value: str - - _hash_url_fragment_re = re.compile( - # NB: we do not validate that the second group (.*) is a valid hex - # digest. Instead, we simply keep that string in this class, and then check it - # against Hashes when hash-checking is needed. This is easier to debug than - # proactively discarding an invalid hex digest, as we handle incorrect hashes - # and malformed hashes in the same place. - r"[#&]({choices})=([^&]*)".format( - choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES) - ), - ) - - def __post_init__(self) -> None: - assert self.name in _SUPPORTED_HASHES - - @classmethod - @functools.lru_cache(maxsize=None) - def find_hash_url_fragment(cls, url: str) -> Optional["LinkHash"]: - """Search a string for a checksum algorithm name and encoded output value.""" - match = cls._hash_url_fragment_re.search(url) - if match is None: - return None - name, value = match.groups() - return cls(name=name, value=value) - - def as_dict(self) -> Dict[str, str]: - return {self.name: self.value} - - def as_hashes(self) -> Hashes: - """Return a Hashes instance which checks only for the current hash.""" - return Hashes({self.name: [self.value]}) - - def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool: - """ - Return True if the current hash is allowed by `hashes`. - """ - if hashes is None: - return False - return hashes.is_hash_allowed(self.name, hex_digest=self.value) - - -@dataclass(frozen=True) -class MetadataFile: - """Information about a core metadata file associated with a distribution.""" - - hashes: Optional[Dict[str, str]] - - def __post_init__(self) -> None: - if self.hashes is not None: - assert all(name in _SUPPORTED_HASHES for name in self.hashes) - - -def supported_hashes(hashes: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]: - # Remove any unsupported hash types from the mapping. If this leaves no - # supported hashes, return None - if hashes is None: - return None - hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES} - if not hashes: - return None - return hashes - - -def _clean_url_path_part(part: str) -> str: - """ - Clean a "part" of a URL path (i.e. after splitting on "@" characters). - """ - # We unquote prior to quoting to make sure nothing is double quoted. - return urllib.parse.quote(urllib.parse.unquote(part)) - - -def _clean_file_url_path(part: str) -> str: - """ - Clean the first part of a URL path that corresponds to a local - filesystem path (i.e. the first part after splitting on "@" characters). - """ - # We unquote prior to quoting to make sure nothing is double quoted. - # Also, on Windows the path part might contain a drive letter which - # should not be quoted. On Linux where drive letters do not - # exist, the colon should be quoted. We rely on urllib.request - # to do the right thing here. - return urllib.request.pathname2url(urllib.request.url2pathname(part)) - - -# percent-encoded: / -_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE) - - -def _clean_url_path(path: str, is_local_path: bool) -> str: - """ - Clean the path portion of a URL. - """ - if is_local_path: - clean_func = _clean_file_url_path - else: - clean_func = _clean_url_path_part - - # Split on the reserved characters prior to cleaning so that - # revision strings in VCS URLs are properly preserved. - parts = _reserved_chars_re.split(path) - - cleaned_parts = [] - for to_clean, reserved in pairwise(itertools.chain(parts, [""])): - cleaned_parts.append(clean_func(to_clean)) - # Normalize %xx escapes (e.g. %2f -> %2F) - cleaned_parts.append(reserved.upper()) - - return "".join(cleaned_parts) - - -def _ensure_quoted_url(url: str) -> str: - """ - Make sure a link is fully quoted. - For example, if ' ' occurs in the URL, it will be replaced with "%20", - and without double-quoting other characters. - """ - # Split the URL into parts according to the general structure - # `scheme://netloc/path;parameters?query#fragment`. - result = urllib.parse.urlparse(url) - # If the netloc is empty, then the URL refers to a local filesystem path. - is_local_path = not result.netloc - path = _clean_url_path(result.path, is_local_path=is_local_path) - return urllib.parse.urlunparse(result._replace(path=path)) - - -class Link(KeyBasedCompareMixin): - """Represents a parsed link from a Package Index's simple URL""" - - __slots__ = [ - "_parsed_url", - "_url", - "_hashes", - "comes_from", - "requires_python", - "yanked_reason", - "metadata_file_data", - "cache_link_parsing", - "egg_fragment", - ] - - def __init__( - self, - url: str, - comes_from: Optional[Union[str, "IndexContent"]] = None, - requires_python: Optional[str] = None, - yanked_reason: Optional[str] = None, - metadata_file_data: Optional[MetadataFile] = None, - cache_link_parsing: bool = True, - hashes: Optional[Mapping[str, str]] = None, - ) -> None: - """ - :param url: url of the resource pointed to (href of the link) - :param comes_from: instance of IndexContent where the link was found, - or string. - :param requires_python: String containing the `Requires-Python` - metadata field, specified in PEP 345. This may be specified by - a data-requires-python attribute in the HTML link tag, as - described in PEP 503. - :param yanked_reason: the reason the file has been yanked, if the - file has been yanked, or None if the file hasn't been yanked. - This is the value of the "data-yanked" attribute, if present, in - a simple repository HTML link. If the file has been yanked but - no reason was provided, this should be the empty string. See - PEP 592 for more information and the specification. - :param metadata_file_data: the metadata attached to the file, or None if - no such metadata is provided. This argument, if not None, indicates - that a separate metadata file exists, and also optionally supplies - hashes for that file. - :param cache_link_parsing: A flag that is used elsewhere to determine - whether resources retrieved from this link should be cached. PyPI - URLs should generally have this set to False, for example. - :param hashes: A mapping of hash names to digests to allow us to - determine the validity of a download. - """ - - # The comes_from, requires_python, and metadata_file_data arguments are - # only used by classmethods of this class, and are not used in client - # code directly. - - # url can be a UNC windows share - if url.startswith("\\\\"): - url = path_to_url(url) - - self._parsed_url = urllib.parse.urlsplit(url) - # Store the url as a private attribute to prevent accidentally - # trying to set a new value. - self._url = url - - link_hash = LinkHash.find_hash_url_fragment(url) - hashes_from_link = {} if link_hash is None else link_hash.as_dict() - if hashes is None: - self._hashes = hashes_from_link - else: - self._hashes = {**hashes, **hashes_from_link} - - self.comes_from = comes_from - self.requires_python = requires_python if requires_python else None - self.yanked_reason = yanked_reason - self.metadata_file_data = metadata_file_data - - super().__init__(key=url, defining_class=Link) - - self.cache_link_parsing = cache_link_parsing - self.egg_fragment = self._egg_fragment() - - @classmethod - def from_json( - cls, - file_data: Dict[str, Any], - page_url: str, - ) -> Optional["Link"]: - """ - Convert an pypi json document from a simple repository page into a Link. - """ - file_url = file_data.get("url") - if file_url is None: - return None - - url = _ensure_quoted_url(urllib.parse.urljoin(page_url, file_url)) - pyrequire = file_data.get("requires-python") - yanked_reason = file_data.get("yanked") - hashes = file_data.get("hashes", {}) - - # PEP 714: Indexes must use the name core-metadata, but - # clients should support the old name as a fallback for compatibility. - metadata_info = file_data.get("core-metadata") - if metadata_info is None: - metadata_info = file_data.get("dist-info-metadata") - - # The metadata info value may be a boolean, or a dict of hashes. - if isinstance(metadata_info, dict): - # The file exists, and hashes have been supplied - metadata_file_data = MetadataFile(supported_hashes(metadata_info)) - elif metadata_info: - # The file exists, but there are no hashes - metadata_file_data = MetadataFile(None) - else: - # False or not present: the file does not exist - metadata_file_data = None - - # The Link.yanked_reason expects an empty string instead of a boolean. - if yanked_reason and not isinstance(yanked_reason, str): - yanked_reason = "" - # The Link.yanked_reason expects None instead of False. - elif not yanked_reason: - yanked_reason = None - - return cls( - url, - comes_from=page_url, - requires_python=pyrequire, - yanked_reason=yanked_reason, - hashes=hashes, - metadata_file_data=metadata_file_data, - ) - - @classmethod - def from_element( - cls, - anchor_attribs: Dict[str, Optional[str]], - page_url: str, - base_url: str, - ) -> Optional["Link"]: - """ - Convert an anchor element's attributes in a simple repository page to a Link. - """ - href = anchor_attribs.get("href") - if not href: - return None - - url = _ensure_quoted_url(urllib.parse.urljoin(base_url, href)) - pyrequire = anchor_attribs.get("data-requires-python") - yanked_reason = anchor_attribs.get("data-yanked") - - # PEP 714: Indexes must use the name data-core-metadata, but - # clients should support the old name as a fallback for compatibility. - metadata_info = anchor_attribs.get("data-core-metadata") - if metadata_info is None: - metadata_info = anchor_attribs.get("data-dist-info-metadata") - # The metadata info value may be the string "true", or a string of - # the form "hashname=hashval" - if metadata_info == "true": - # The file exists, but there are no hashes - metadata_file_data = MetadataFile(None) - elif metadata_info is None: - # The file does not exist - metadata_file_data = None - else: - # The file exists, and hashes have been supplied - hashname, sep, hashval = metadata_info.partition("=") - if sep == "=": - metadata_file_data = MetadataFile(supported_hashes({hashname: hashval})) - else: - # Error - data is wrong. Treat as no hashes supplied. - logger.debug( - "Index returned invalid data-dist-info-metadata value: %s", - metadata_info, - ) - metadata_file_data = MetadataFile(None) - - return cls( - url, - comes_from=page_url, - requires_python=pyrequire, - yanked_reason=yanked_reason, - metadata_file_data=metadata_file_data, - ) - - def __str__(self) -> str: - if self.requires_python: - rp = f" (requires-python:{self.requires_python})" - else: - rp = "" - if self.comes_from: - return "{} (from {}){}".format( - redact_auth_from_url(self._url), self.comes_from, rp - ) - else: - return redact_auth_from_url(str(self._url)) - - def __repr__(self) -> str: - return f"" - - @property - def url(self) -> str: - return self._url - - @property - def filename(self) -> str: - path = self.path.rstrip("/") - name = posixpath.basename(path) - if not name: - # Make sure we don't leak auth information if the netloc - # includes a username and password. - netloc, user_pass = split_auth_from_netloc(self.netloc) - return netloc - - name = urllib.parse.unquote(name) - assert name, f"URL {self._url!r} produced no filename" - return name - - @property - def file_path(self) -> str: - return url_to_path(self.url) - - @property - def scheme(self) -> str: - return self._parsed_url.scheme - - @property - def netloc(self) -> str: - """ - This can contain auth information. - """ - return self._parsed_url.netloc - - @property - def path(self) -> str: - return urllib.parse.unquote(self._parsed_url.path) - - def splitext(self) -> Tuple[str, str]: - return splitext(posixpath.basename(self.path.rstrip("/"))) - - @property - def ext(self) -> str: - return self.splitext()[1] - - @property - def url_without_fragment(self) -> str: - scheme, netloc, path, query, fragment = self._parsed_url - return urllib.parse.urlunsplit((scheme, netloc, path, query, "")) - - _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") - - # Per PEP 508. - _project_name_re = re.compile( - r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE - ) - - def _egg_fragment(self) -> Optional[str]: - match = self._egg_fragment_re.search(self._url) - if not match: - return None - - # An egg fragment looks like a PEP 508 project name, along with - # an optional extras specifier. Anything else is invalid. - project_name = match.group(1) - if not self._project_name_re.match(project_name): - deprecated( - reason=f"{self} contains an egg fragment with a non-PEP 508 name", - replacement="to use the req @ url syntax, and remove the egg fragment", - gone_in="25.0", - issue=11617, - ) - - return project_name - - _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") - - @property - def subdirectory_fragment(self) -> Optional[str]: - match = self._subdirectory_fragment_re.search(self._url) - if not match: - return None - return match.group(1) - - def metadata_link(self) -> Optional["Link"]: - """Return a link to the associated core metadata file (if any).""" - if self.metadata_file_data is None: - return None - metadata_url = f"{self.url_without_fragment}.metadata" - if self.metadata_file_data.hashes is None: - return Link(metadata_url) - return Link(metadata_url, hashes=self.metadata_file_data.hashes) - - def as_hashes(self) -> Hashes: - return Hashes({k: [v] for k, v in self._hashes.items()}) - - @property - def hash(self) -> Optional[str]: - return next(iter(self._hashes.values()), None) - - @property - def hash_name(self) -> Optional[str]: - return next(iter(self._hashes), None) - - @property - def show_url(self) -> str: - return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0]) - - @property - def is_file(self) -> bool: - return self.scheme == "file" - - def is_existing_dir(self) -> bool: - return self.is_file and os.path.isdir(self.file_path) - - @property - def is_wheel(self) -> bool: - return self.ext == WHEEL_EXTENSION - - @property - def is_vcs(self) -> bool: - from pip._internal.vcs import vcs - - return self.scheme in vcs.all_schemes - - @property - def is_yanked(self) -> bool: - return self.yanked_reason is not None - - @property - def has_hash(self) -> bool: - return bool(self._hashes) - - def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool: - """ - Return True if the link has a hash and it is allowed by `hashes`. - """ - if hashes is None: - return False - return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items()) - - -class _CleanResult(NamedTuple): - """Convert link for equivalency check. - - This is used in the resolver to check whether two URL-specified requirements - likely point to the same distribution and can be considered equivalent. This - equivalency logic avoids comparing URLs literally, which can be too strict - (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users. - - Currently this does three things: - - 1. Drop the basic auth part. This is technically wrong since a server can - serve different content based on auth, but if it does that, it is even - impossible to guarantee two URLs without auth are equivalent, since - the user can input different auth information when prompted. So the - practical solution is to assume the auth doesn't affect the response. - 2. Parse the query to avoid the ordering issue. Note that ordering under the - same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are - still considered different. - 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and - hash values, since it should have no impact the downloaded content. Note - that this drops the "egg=" part historically used to denote the requested - project (and extras), which is wrong in the strictest sense, but too many - people are supplying it inconsistently to cause superfluous resolution - conflicts, so we choose to also ignore them. - """ - - parsed: urllib.parse.SplitResult - query: Dict[str, List[str]] - subdirectory: str - hashes: Dict[str, str] - - -def _clean_link(link: Link) -> _CleanResult: - parsed = link._parsed_url - netloc = parsed.netloc.rsplit("@", 1)[-1] - # According to RFC 8089, an empty host in file: means localhost. - if parsed.scheme == "file" and not netloc: - netloc = "localhost" - fragment = urllib.parse.parse_qs(parsed.fragment) - if "egg" in fragment: - logger.debug("Ignoring egg= fragment in %s", link) - try: - # If there are multiple subdirectory values, use the first one. - # This matches the behavior of Link.subdirectory_fragment. - subdirectory = fragment["subdirectory"][0] - except (IndexError, KeyError): - subdirectory = "" - # If there are multiple hash values under the same algorithm, use the - # first one. This matches the behavior of Link.hash_value. - hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment} - return _CleanResult( - parsed=parsed._replace(netloc=netloc, query="", fragment=""), - query=urllib.parse.parse_qs(parsed.query), - subdirectory=subdirectory, - hashes=hashes, - ) - - -@functools.lru_cache(maxsize=None) -def links_equivalent(link1: Link, link2: Link) -> bool: - return _clean_link(link1) == _clean_link(link2) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/__init__.py deleted file mode 100644 index d92acc7bedfc5c7c05130986a256e610640582e5..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -__all__ = [ - "__version__", - "AbstractProvider", - "AbstractResolver", - "BaseReporter", - "InconsistentCandidate", - "Resolver", - "RequirementsConflicted", - "ResolutionError", - "ResolutionImpossible", - "ResolutionTooDeep", -] - -__version__ = "1.0.1" - - -from .providers import AbstractProvider, AbstractResolver -from .reporters import BaseReporter -from .resolvers import ( - InconsistentCandidate, - RequirementsConflicted, - ResolutionError, - ResolutionImpossible, - ResolutionTooDeep, - Resolver, -) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/launch.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/launch.py deleted file mode 100644 index 0208fdf33b640cd9791359d74673bb90cfb87f96..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/launch.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Launch the Python script on the command line after -setuptools is bootstrapped via import. -""" - -# Note that setuptools gets imported implicitly by the -# invocation of this script using python -m setuptools.launch - -import tokenize -import sys - - -def run(): - """ - Run the script in sys.argv[1] as if it had - been invoked naturally. - """ - __builtins__ - script_name = sys.argv[1] - namespace = dict( - __file__=script_name, - __name__='__main__', - __doc__=None, - ) - sys.argv[:] = sys.argv[1:] - - open_ = getattr(tokenize, 'open', open) - with open_(script_name) as fid: - script = fid.read() - norm_script = script.replace('\\r\\n', '\\n') - code = compile(norm_script, script_name, 'exec') - exec(code, namespace) - - -if __name__ == '__main__': - run() diff --git a/spaces/VideoCrafter/VideoCrafter/scripts/evaluation/funcs.py b/spaces/VideoCrafter/VideoCrafter/scripts/evaluation/funcs.py deleted file mode 100644 index f491835f8471340bf181d83cf4a166a1a8c6bfae..0000000000000000000000000000000000000000 --- a/spaces/VideoCrafter/VideoCrafter/scripts/evaluation/funcs.py +++ /dev/null @@ -1,194 +0,0 @@ -import os, sys, glob -import numpy as np -from collections import OrderedDict -from decord import VideoReader, cpu -import cv2 - -import torch -import torchvision -sys.path.insert(1, os.path.join(sys.path[0], '..', '..')) -from lvdm.models.samplers.ddim import DDIMSampler - - -def batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1.0,\ - cfg_scale=1.0, temporal_cfg_scale=None, **kwargs): - ddim_sampler = DDIMSampler(model) - uncond_type = model.uncond_type - batch_size = noise_shape[0] - - ## construct unconditional guidance - if cfg_scale != 1.0: - if uncond_type == "empty_seq": - prompts = batch_size * [""] - #prompts = N * T * [""] ## if is_imgbatch=True - uc_emb = model.get_learned_conditioning(prompts) - elif uncond_type == "zero_embed": - c_emb = cond["c_crossattn"][0] if isinstance(cond, dict) else cond - uc_emb = torch.zeros_like(c_emb) - - ## process image embedding token - if hasattr(model, 'embedder'): - uc_img = torch.zeros(noise_shape[0],3,224,224).to(model.device) - ## img: b c h w >> b l c - uc_img = model.get_image_embeds(uc_img) - uc_emb = torch.cat([uc_emb, uc_img], dim=1) - - if isinstance(cond, dict): - uc = {key:cond[key] for key in cond.keys()} - uc.update({'c_crossattn': [uc_emb]}) - else: - uc = uc_emb - else: - uc = None - - x_T = None - batch_variants = [] - #batch_variants1, batch_variants2 = [], [] - for _ in range(n_samples): - if ddim_sampler is not None: - kwargs.update({"clean_cond": True}) - samples, _ = ddim_sampler.sample(S=ddim_steps, - conditioning=cond, - batch_size=noise_shape[0], - shape=noise_shape[1:], - verbose=False, - unconditional_guidance_scale=cfg_scale, - unconditional_conditioning=uc, - eta=ddim_eta, - temporal_length=noise_shape[2], - conditional_guidance_scale_temporal=temporal_cfg_scale, - x_T=x_T, - **kwargs - ) - ## reconstruct from latent to pixel space - batch_images = model.decode_first_stage_2DAE(samples) - batch_variants.append(batch_images) - ## batch, , c, t, h, w - batch_variants = torch.stack(batch_variants, dim=1) - return batch_variants - - -def get_filelist(data_dir, ext='*'): - file_list = glob.glob(os.path.join(data_dir, '*.%s'%ext)) - file_list.sort() - return file_list - -def get_dirlist(path): - list = [] - if (os.path.exists(path)): - files = os.listdir(path) - for file in files: - m = os.path.join(path,file) - if (os.path.isdir(m)): - list.append(m) - list.sort() - return list - - -def load_model_checkpoint(model, ckpt): - def load_checkpoint(model, ckpt, full_strict): - state_dict = torch.load(ckpt, map_location="cpu") - try: - ## deepspeed - new_pl_sd = OrderedDict() - for key in state_dict['module'].keys(): - new_pl_sd[key[16:]]=state_dict['module'][key] - model.load_state_dict(new_pl_sd, strict=full_strict) - except: - if "state_dict" in list(state_dict.keys()): - state_dict = state_dict["state_dict"] - model.load_state_dict(state_dict, strict=full_strict) - return model - load_checkpoint(model, ckpt, full_strict=True) - print('>>> model checkpoint loaded.') - return model - - -def load_prompts(prompt_file): - f = open(prompt_file, 'r') - prompt_list = [] - for idx, line in enumerate(f.readlines()): - l = line.strip() - if len(l) != 0: - prompt_list.append(l) - f.close() - return prompt_list - - -def load_video_batch(filepath_list, frame_stride, video_size=(256,256), video_frames=16): - ''' - Notice about some special cases: - 1. video_frames=-1 means to take all the frames (with fs=1) - 2. when the total video frames is less than required, padding strategy will be used (repreated last frame) - ''' - fps_list = [] - batch_tensor = [] - assert frame_stride > 0, "valid frame stride should be a positive interge!" - for filepath in filepath_list: - padding_num = 0 - vidreader = VideoReader(filepath, ctx=cpu(0), width=video_size[1], height=video_size[0]) - fps = vidreader.get_avg_fps() - total_frames = len(vidreader) - max_valid_frames = (total_frames-1) // frame_stride + 1 - if video_frames < 0: - ## all frames are collected: fs=1 is a must - required_frames = total_frames - frame_stride = 1 - else: - required_frames = video_frames - query_frames = min(required_frames, max_valid_frames) - frame_indices = [frame_stride*i for i in range(query_frames)] - - ## [t,h,w,c] -> [c,t,h,w] - frames = vidreader.get_batch(frame_indices) - frame_tensor = torch.tensor(frames.asnumpy()).permute(3, 0, 1, 2).float() - frame_tensor = (frame_tensor / 255. - 0.5) * 2 - if max_valid_frames < required_frames: - padding_num = required_frames - max_valid_frames - frame_tensor = torch.cat([frame_tensor, *([frame_tensor[:,-1:,:,:]]*padding_num)], dim=1) - print(f'{os.path.split(filepath)[1]} is not long enough: {padding_num} frames padded.') - batch_tensor.append(frame_tensor) - sample_fps = int(fps/frame_stride) - fps_list.append(sample_fps) - - return torch.stack(batch_tensor, dim=0) - -from PIL import Image -def load_image_batch(filepath_list, image_size=(256,256)): - batch_tensor = [] - for filepath in filepath_list: - _, filename = os.path.split(filepath) - _, ext = os.path.splitext(filename) - if ext == '.mp4': - vidreader = VideoReader(filepath, ctx=cpu(0), width=image_size[1], height=image_size[0]) - frame = vidreader.get_batch([0]) - img_tensor = torch.tensor(frame.asnumpy()).squeeze(0).permute(2, 0, 1).float() - elif ext == '.png' or ext == '.jpg': - img = Image.open(filepath).convert("RGB") - rgb_img = np.array(img, np.float32) - #bgr_img = cv2.imread(filepath, cv2.IMREAD_COLOR) - #bgr_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB) - rgb_img = cv2.resize(rgb_img, (image_size[1],image_size[0]), interpolation=cv2.INTER_LINEAR) - img_tensor = torch.from_numpy(rgb_img).permute(2, 0, 1).float() - else: - print(f'ERROR: <{ext}> image loading only support format: [mp4], [png], [jpg]') - raise NotImplementedError - img_tensor = (img_tensor / 255. - 0.5) * 2 - batch_tensor.append(img_tensor) - return torch.stack(batch_tensor, dim=0) - - -def save_videos(batch_tensors, savedir, filenames, fps=10): - # b,samples,c,t,h,w - n_samples = batch_tensors.shape[1] - for idx, vid_tensor in enumerate(batch_tensors): - video = vid_tensor.detach().cpu() - video = torch.clamp(video.float(), -1., 1.) - video = video.permute(2, 0, 1, 3, 4) # t,n,c,h,w - frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(n_samples)) for framesheet in video] #[3, 1*h, n*w] - grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, n*h, w] - grid = (grid + 1.0) / 2.0 - grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1) - savepath = os.path.join(savedir, f"{filenames[idx]}.mp4") - torchvision.io.write_video(savepath, grid, fps=fps, video_codec='h264', options={'crf': '10'}) - diff --git a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py b/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py deleted file mode 100644 index 87d741390b3c12e174ae5ddbe71436a5a8ab8718..0000000000000000000000000000000000000000 --- a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py +++ /dev/null @@ -1,231 +0,0 @@ -import cv2 -import gradio as gr -import numpy as np -import torch -from diffusers import ControlNetModel -from PIL import Image - -from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import ( - StableDiffusionControlNetInpaintPipeline, -) -from diffusion_webui.utils.model_list import ( - controlnet_canny_model_list, - stable_inpiant_model_list, -) -from diffusion_webui.utils.scheduler_list import ( - SCHEDULER_LIST, - get_scheduler_list, -) - -# https://github.com/mikonvergence/ControlNetInpaint - - -class StableDiffusionControlNetInpaintCannyGenerator: - def __init__(self): - self.pipe = None - - def load_model(self, stable_model_path, controlnet_model_path, scheduler): - if self.pipe is None: - controlnet = ControlNetModel.from_pretrained( - controlnet_model_path, torch_dtype=torch.float16 - ) - self.pipe = ( - StableDiffusionControlNetInpaintPipeline.from_pretrained( - pretrained_model_name_or_path=stable_model_path, - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, - ) - ) - - self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler) - self.pipe.to("cuda") - self.pipe.enable_xformers_memory_efficient_attention() - - return self.pipe - - def load_image(self, image_path): - image = np.array(image_path) - image = Image.fromarray(image) - return image - - def controlnet_canny_inpaint( - self, - image_path: str, - ): - image = image_path["image"].convert("RGB").resize((512, 512)) - image = np.array(image) - - image = cv2.Canny(image, 100, 200) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis=2) - image = Image.fromarray(image) - - return image - - def generate_image( - self, - image_path: str, - stable_model_path: str, - controlnet_model_path: str, - prompt: str, - negative_prompt: str, - num_images_per_prompt: int, - guidance_scale: int, - num_inference_step: int, - controlnet_conditioning_scale: int, - scheduler: str, - seed_generator: int, - ): - - normal_image = image_path["image"].convert("RGB").resize((512, 512)) - mask_image = image_path["mask"].convert("RGB").resize((512, 512)) - - normal_image = self.load_image(image_path=normal_image) - mask_image = self.load_image(image_path=mask_image) - - control_image = self.controlnet_canny_inpaint(image_path=image_path) - pipe = self.load_model( - stable_model_path=stable_model_path, - controlnet_model_path=controlnet_model_path, - scheduler=scheduler, - ) - - if seed_generator == 0: - random_seed = torch.randint(0, 1000000, (1,)) - generator = torch.manual_seed(random_seed) - else: - generator = torch.manual_seed(seed_generator) - - output = pipe( - prompt=prompt, - image=normal_image, - mask_image=mask_image, - control_image=control_image, - negative_prompt=negative_prompt, - num_images_per_prompt=num_images_per_prompt, - num_inference_steps=num_inference_step, - guidance_scale=guidance_scale, - controlnet_conditioning_scale=controlnet_conditioning_scale, - generator=generator, - ).images - - return output - - def app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - controlnet_canny_inpaint_image_file = gr.Image( - source="upload", - tool="sketch", - elem_id="image_upload", - type="pil", - label="Upload", - ) - - controlnet_canny_inpaint_prompt = gr.Textbox( - lines=1, placeholder="Prompt", show_label=False - ) - - controlnet_canny_inpaint_negative_prompt = gr.Textbox( - lines=1, - show_label=False, - placeholder="Negative Prompt", - ) - with gr.Row(): - with gr.Column(): - controlnet_canny_inpaint_stable_model_id = ( - gr.Dropdown( - choices=stable_inpiant_model_list, - value=stable_inpiant_model_list[0], - label="Stable Model Id", - ) - ) - - controlnet_canny_inpaint_guidance_scale = gr.Slider( - minimum=0.1, - maximum=15, - step=0.1, - value=7.5, - label="Guidance Scale", - ) - - controlnet_canny_inpaint_num_inference_step = ( - gr.Slider( - minimum=1, - maximum=100, - step=1, - value=50, - label="Num Inference Step", - ) - ) - controlnet_canny_inpaint_num_images_per_prompt = ( - gr.Slider( - minimum=1, - maximum=10, - step=1, - value=1, - label="Number Of Images", - ) - ) - with gr.Row(): - with gr.Column(): - controlnet_canny_inpaint_model_id = gr.Dropdown( - choices=controlnet_canny_model_list, - value=controlnet_canny_model_list[0], - label="Controlnet Model Id", - ) - controlnet_canny_inpaint_scheduler = ( - gr.Dropdown( - choices=SCHEDULER_LIST, - value=SCHEDULER_LIST[0], - label="Scheduler", - ) - ) - controlnet_canny_inpaint_controlnet_conditioning_scale = gr.Slider( - minimum=0.1, - maximum=1.0, - step=0.1, - value=0.5, - label="Controlnet Conditioning Scale", - ) - - controlnet_canny_inpaint_seed_generator = ( - gr.Slider( - minimum=0, - maximum=1000000, - step=1, - value=0, - label="Seed Generator", - ) - ) - - controlnet_canny_inpaint_predict = gr.Button( - value="Generator" - ) - - with gr.Column(): - output_image = gr.Gallery( - label="Generated images", - show_label=False, - elem_id="gallery", - ).style(grid=(1, 2)) - - controlnet_canny_inpaint_predict.click( - fn=StableDiffusionControlNetInpaintCannyGenerator().generate_image, - inputs=[ - controlnet_canny_inpaint_image_file, - controlnet_canny_inpaint_stable_model_id, - controlnet_canny_inpaint_model_id, - controlnet_canny_inpaint_prompt, - controlnet_canny_inpaint_negative_prompt, - controlnet_canny_inpaint_num_images_per_prompt, - controlnet_canny_inpaint_guidance_scale, - controlnet_canny_inpaint_num_inference_step, - controlnet_canny_inpaint_controlnet_conditioning_scale, - controlnet_canny_inpaint_scheduler, - controlnet_canny_inpaint_seed_generator, - ], - outputs=[output_image], - ) diff --git a/spaces/Wootang01/Paraphraser_two/app.py b/spaces/Wootang01/Paraphraser_two/app.py deleted file mode 100644 index ea3ed055ed9857bbc92320c539efe48738a0a78a..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/Paraphraser_two/app.py +++ /dev/null @@ -1,40 +0,0 @@ -import streamlit as st -from transformers import PegasusForConditionalGeneration, PegasusTokenizer - -st.title("Paraphrase with Pegasus") -st.write("Paraphrase means to express meaning using different words. Pegasus refers to a natural language processing model.") -st.write("Write or paste text below, select a number of paraphrases and enter. The machine will attempt to produce your selected number of paraphrases. You can also select advanced features.") - -model_name = "tuner007/pegasus_paraphrase" -torch_device = "cpu" -tokenizer = PegasusTokenizer.from_pretrained(model_name) - - -@st.cache(allow_output_mutation=True) -def load_model(): - model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device) - return model - -def get_response( - input_text, num_return_sequences, num_beams, max_length=512, temperature=1.5 -): - - model = load_model() - batch = tokenizer([input_text], truncation=True, padding="longest", max_length=max_length, return_tensors="pt").to(torch_device) - translated = model.generate(**batch, max_length=max_length, num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=temperature) - tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True) - return tgt_text - -num_beams = 10 -num_return_sequences = st.slider("Number of paraphrases", 1, 5, 3, 1) -context = st.text_area(label="Write or paste text", max_chars=512) - -with st.expander("Advanced"): - temperature = st.slider("Temperature", 0.1, 5.0, 1.5, 0.1) - max_length = st.slider("Max length", 10, 512, 256, 10) - -if context: - response = get_response(context, num_return_sequences, num_beams, max_length, temperature) - for paraphrase in response: - st.write(paraphrase) - diff --git a/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/models/__init__.py b/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/models/__init__.py deleted file mode 100644 index 92c7a48a200eba455044cd66e0d2c1efe6494f5c..0000000000000000000000000000000000000000 --- a/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/models/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .musicgen import MusicGen -from .lm import LMModel -from .encodec import CompressionModel, EncodecModel diff --git a/spaces/XPMaster/chainladder/README.md b/spaces/XPMaster/chainladder/README.md deleted file mode 100644 index 4c454a609d8eec231e76ff208d38773bf799bda9..0000000000000000000000000000000000000000 --- a/spaces/XPMaster/chainladder/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chainladder -emoji: 📊 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/XzJosh/Bella-Bert-VITS2/text/chinese_bert.py b/spaces/XzJosh/Bella-Bert-VITS2/text/chinese_bert.py deleted file mode 100644 index cb84ce0b426cd0a1c7954ddcdf41322c10ed14fa..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Bella-Bert-VITS2/text/chinese_bert.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large") -model = AutoModelForMaskedLM.from_pretrained("./bert/chinese-roberta-wwm-ext-large").to(device) - -def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors='pt') - for i in inputs: - inputs[i] = inputs[i].to(device) - res = model(**inputs, output_hidden_states=True) - res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu() - - assert len(word2ph) == len(text)+2 - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - - return phone_level_feature.T - -if __name__ == '__main__': - # feature = get_bert_feature('你好,我是说的道理。') - import torch - - word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 - word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1] - - # 计算总帧数 - total_frames = sum(word2phone) - print(word_level_feature.shape) - print(word2phone) - phone_level_feature = [] - for i in range(len(word2phone)): - print(word_level_feature[i].shape) - - # 对每个词重复word2phone[i]次 - repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - print(phone_level_feature.shape) # torch.Size([36, 1024]) - diff --git a/spaces/XzJosh/Jianmo-Bert-VITS2/monotonic_align/__init__.py b/spaces/XzJosh/Jianmo-Bert-VITS2/monotonic_align/__init__.py deleted file mode 100644 index 75603d26cf2b8d6196f5a68a89f9e49d8e519bc8..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Jianmo-Bert-VITS2/monotonic_align/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - -def maximum_path(neg_cent, mask): - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/YONG627/456123/yolov5-code-main/hubconf.py b/spaces/YONG627/456123/yolov5-code-main/hubconf.py deleted file mode 100644 index 41af8e39d14deba8679400d02c192696bcf37544..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/hubconf.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 - -Usage: - import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model - model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch - model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model - model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo -""" - -import torch - - -def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates or loads a YOLOv5 model - - Arguments: - name (str): model name 'yolov5s' or path 'path/to/best.pt' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - autoshape (bool): apply YOLOv5 .autoshape() wrapper to model - verbose (bool): print all information to screen - device (str, torch.device, None): device to use for model parameters - - Returns: - YOLOv5 model - """ - from pathlib import Path - - from models.common import AutoShape, DetectMultiBackend - from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel, SegmentationModel - from utils.downloads import attempt_download - from utils.general import LOGGER, check_requirements, intersect_dicts, logging - from utils.torch_utils import select_device - - if not verbose: - LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) - name = Path(name) - path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path - try: - device = select_device(device) - if pretrained and channels == 3 and classes == 80: - try: - model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model - if autoshape: - if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' - 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') - elif model.pt and isinstance(model.model, SegmentationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' - 'You will not be able to run inference with this model.') - else: - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS - except Exception: - model = attempt_load(path, device=device, fuse=False) # arbitrary model - else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path - model = DetectionModel(cfg, channels, classes) # create model - if pretrained: - ckpt = torch.load(attempt_download(path), map_location=device) # load - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if not verbose: - LOGGER.setLevel(logging.INFO) # reset to default - return model.to(device) - - except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' - raise Exception(s) from e - - -def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): - # YOLOv5 custom or local model - return _create(path, autoshape=autoshape, verbose=_verbose, device=device) - - -def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano model https://github.com/ultralytics/yolov5 - return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) - - -if __name__ == '__main__': - import argparse - from pathlib import Path - - import numpy as np - from PIL import Image - - from utils.general import cv2, print_args - - # Argparser - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s', help='model name') - opt = parser.parse_args() - print_args(vars(opt)) - - # Model - model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) - # model = custom(path='path/to/model.pt') # custom - - # Images - imgs = [ - 'data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy - - # Inference - results = model(imgs, size=320) # batched inference - - # Results - results.print() - results.save() diff --git a/spaces/YanzBotz/stablediffusionapi-disney-pixar-cartoon/README.md b/spaces/YanzBotz/stablediffusionapi-disney-pixar-cartoon/README.md deleted file mode 100644 index 972dbcc71efa6e74e346b3968915868ac6162282..0000000000000000000000000000000000000000 --- a/spaces/YanzBotz/stablediffusionapi-disney-pixar-cartoon/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stablediffusionapi Disney Pixar Cartoon -emoji: 🦀 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py deleted file mode 100644 index 85c9f1a9df8a4038fbd4246239b699402e382309..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .distributed_sampler import ( - InferenceSampler, - RandomSubsetTrainingSampler, - RepeatFactorTrainingSampler, - TrainingSampler, -) - -from .grouped_batch_sampler import GroupedBatchSampler - -__all__ = [ - "GroupedBatchSampler", - "TrainingSampler", - "RandomSubsetTrainingSampler", - "InferenceSampler", - "RepeatFactorTrainingSampler", -] diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp deleted file mode 100644 index c843487b5fa4e8077dd27402ec99009266ddda8d..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#include "box_iou_rotated.h" -#include "box_iou_rotated_utils.h" - -namespace detectron2 { - -template -void box_iou_rotated_cpu_kernel( - const at::Tensor& boxes1, - const at::Tensor& boxes2, - at::Tensor& ious) { - auto num_boxes1 = boxes1.size(0); - auto num_boxes2 = boxes2.size(0); - - for (int i = 0; i < num_boxes1; i++) { - for (int j = 0; j < num_boxes2; j++) { - ious[i * num_boxes2 + j] = single_box_iou_rotated( - boxes1[i].data_ptr(), boxes2[j].data_ptr()); - } - } -} - -at::Tensor box_iou_rotated_cpu( - // input must be contiguous: - const at::Tensor& boxes1, - const at::Tensor& boxes2) { - auto num_boxes1 = boxes1.size(0); - auto num_boxes2 = boxes2.size(0); - at::Tensor ious = - at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); - - box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); - - // reshape from 1d array to 2d array - auto shape = std::vector{num_boxes1, num_boxes2}; - return ious.reshape(shape); -} - -} // namespace detectron2 diff --git a/spaces/YuxinJ/Scenimefy/Scenimefy/models/cut_model.py b/spaces/YuxinJ/Scenimefy/Scenimefy/models/cut_model.py deleted file mode 100644 index 95be2d7ce0351b81c44871c532aceca110cd12eb..0000000000000000000000000000000000000000 --- a/spaces/YuxinJ/Scenimefy/Scenimefy/models/cut_model.py +++ /dev/null @@ -1,370 +0,0 @@ -import numpy as np -import torch -from Scenimefy.models.base_model import BaseModel -from Scenimefy.models import networks -from Scenimefy.models.patchnce import PatchNCELoss -import Scenimefy.utils.util as util -from torch.distributions.beta import Beta -from torch.nn import functional as F -from Scenimefy.models.hDCE import PatchHDCELoss -from Scenimefy.models.SRC import SRC_Loss -import torch.nn as nn - - -def show_np_r(array, min, max, num): - plt.figure(num) - plt.imshow(array, norm=None, cmap='gray', vmin= min, vmax=max) - plt.axis('off') - plt.show() - -def show_hot_r(array, num): - plt.figure(num) - plt.imshow(array, norm=None, cmap='hot') - plt.axis('off') - plt.show() - -def show_torch_rgb(array, min, max, num): - plt.figure(num) - plt.imshow(array.detach().cpu()[0].permute(1,2,0).numpy()*255, norm=None, cmap='gray', vmin= min, vmax=max) - plt.axis('off') - plt.show() - - -class Normalize(nn.Module): - - def __init__(self, power=2): - super(Normalize, self).__init__() - self.power = power - - def forward(self, x): - norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power) - out = x.div(norm + 1e-7) - return out - -def get_lambda(alpha=1.0,size=None,device=None): - '''Return lambda''' - if alpha > 0.: - lam = np.random.beta(alpha, alpha) -# lam = Beta() - else: - lam = 1. - return lam -def get_spa_lambda(alpha=1.0,size=None,device=None): - '''Return lambda''' - if alpha > 0.: - lam = torch.from_numpy(np.random.beta(alpha, alpha,size=size)).float().to(device) -# lam = Beta() - else: - lam = 1. - return lam -class CUTModel(BaseModel): - """ This class implements CUT and FastCUT model, described in the paper - Contrastive Learning for Unpaired Image-to-Image Translation - Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu - ECCV, 2020 - - The code borrows heavily from the PyTorch implementation of CycleGAN - https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix - """ - @staticmethod - def modify_commandline_options(parser, is_train=True): - """ Configures options specific for CUT model - """ - parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)') - - parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))') - parser.add_argument('--lambda_HDCE', type=float, default=1.0, help='weight for HDCE loss: HDCE(G(X), X)') - parser.add_argument('--lambda_SRC', type=float, default=1.0, help='weight for SRC loss: SRC(G(X), X)') - parser.add_argument('--dce_idt', action='store_true') - parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers') - parser.add_argument('--nce_includes_all_negatives_from_minibatch', - type=util.str2bool, nargs='?', const=True, default=False, - help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.') - parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map') - parser.add_argument('--netF_nc', type=int, default=256) - parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss') - parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer') - parser.add_argument('--flip_equivariance', - type=util.str2bool, nargs='?', const=True, default=False, - help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT") - parser.add_argument('--alpha', type=float, default=0.2) - parser.add_argument('--use_curriculum', action='store_true') - parser.add_argument('--HDCE_gamma', type=float, default=1) - parser.add_argument('--HDCE_gamma_min', type=float, default=1) - parser.add_argument('--step_gamma', action='store_true') - parser.add_argument('--step_gamma_epoch', type=int, default=200) - parser.add_argument('--no_Hneg', action='store_true') - - parser.set_defaults(pool_size=0) # no image pooling - - opt, _ = parser.parse_known_args() - - return parser - - def __init__(self, opt): - BaseModel.__init__(self, opt) - - self.train_epoch = None - - # specify the training losses you want to print out. - # The training/test scripts will call - self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G'] - - if opt.lambda_HDCE > 0.0: - self.loss_names.append('HDCE') - if opt.dce_idt and self.isTrain: - self.loss_names += ['HDCE_Y'] - - if opt.lambda_SRC > 0.0: - self.loss_names.append('SRC') - - - self.visual_names = ['real_A', 'fake_B', 'real_B'] - self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')] - self.alpha = opt.alpha - if opt.dce_idt and self.isTrain: - self.visual_names += ['idt_B'] - - if self.isTrain: - self.model_names = ['G', 'F', 'D'] - else: # during test time, only load G - self.model_names = ['G'] - # define networks (both generator and discriminator) - self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) - self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) - - - if self.isTrain: - self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) - - # define loss functions - self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) - self.criterionNCE = [] - self.criterionHDCE = [] - - for i, nce_layer in enumerate(self.nce_layers): - self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) - self.criterionHDCE.append(PatchHDCELoss(opt=opt).to(self.device)) - - self.criterionIdt = torch.nn.L1Loss().to(self.device) - self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) - self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) - self.optimizers.append(self.optimizer_G) - self.optimizers.append(self.optimizer_D) - - self.criterionR = [] - for nce_layer in self.nce_layers: - self.criterionR.append(SRC_Loss(opt).to(self.device)) - - - def data_dependent_initialize(self, data): - """ - The feature network netF is defined in terms of the shape of the intermediate, extracted - features of the encoder portion of netG. Because of this, the weights of netF are - initialized at the first feedforward pass with some input images. - Please also see PatchSampleF.create_mlp(), which is called at the first forward() call. - """ - self.set_input(data) - bs_per_gpu = self.real_A.size(0) // max(len(self.opt.gpu_ids), 1) - self.real_A = self.real_A[:bs_per_gpu] - self.real_B = self.real_B[:bs_per_gpu] - self.forward() # compute fake images: G(A) - if self.opt.isTrain: - self.compute_D_loss().backward() # calculate gradients for D - self.compute_G_loss().backward() # calculate graidents for G - # if self.opt.lambda_NCE > 0.0: - # self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) - # self.optimizers.append(self.optimizer_F) - # - # elif self.opt.lambda_HDCE > 0.0: - self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) - self.optimizers.append(self.optimizer_F) - - - def optimize_parameters(self): - # forward - self.forward() - - # update D - self.set_requires_grad(self.netD, True) - self.optimizer_D.zero_grad() - self.loss_D = self.compute_D_loss() - self.loss_D.backward() - self.optimizer_D.step() - - # update G - self.set_requires_grad(self.netD, False) - self.optimizer_G.zero_grad() - if self.opt.netF == 'mlp_sample': - # if self.opt.lambda_NCE > 0.0: - # self.optimizer_F.zero_grad() - # elif self.opt.lambda_HDCE > 0.0: - self.optimizer_F.zero_grad() - self.loss_G = self.compute_G_loss() - self.loss_G.backward() - self.optimizer_G.step() - if self.opt.netF == 'mlp_sample': - # if self.opt.lambda_NCE > 0.0: - # self.optimizer_F.step() - # elif self.opt.lambda_HDCE > 0.0: - self.optimizer_F.step() - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - Parameters: - input (dict): include the data itself and its metadata information. - The option 'direction' can be used to swap domain A and domain B. - """ - AtoB = self.opt.direction == 'AtoB' - self.real_A = input['A' if AtoB else 'B'].to(self.device) - self.real_B = input['B' if AtoB else 'A'].to(self.device) - self.image_paths = input['A_paths' if AtoB else 'B_paths'] - - def forward(self): - """Run forward pass; called by both functions and .""" - self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.dce_idt and self.opt.isTrain else self.real_A - if self.opt.flip_equivariance: - self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5) - if self.flipped_for_equivariance: - self.real = torch.flip(self.real, [3]) - - self.fake = self.netG(self.real) - self.fake_B = self.fake[:self.real_A.size(0)] - if self.opt.dce_idt: - self.idt_B = self.fake[self.real_A.size(0):] - - - def set_epoch(self, epoch): - self.train_epoch = epoch - - def compute_D_loss(self): - """Calculate GAN loss for the discriminator""" - fake = self.fake_B.detach() - # Fake; stop backprop to the generator by detaching fake_B - pred_fake = self.netD(fake) - self.loss_D_fake = self.criterionGAN(pred_fake, False).mean() - # Real - self.pred_real = self.netD(self.real_B) - loss_D_real = self.criterionGAN(self.pred_real, True) - self.loss_D_real = loss_D_real.mean() - - # combine loss and calculate gradients - self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 - return self.loss_D - - def compute_G_loss(self): - """Calculate GAN and NCE loss for the generator""" - fake = self.fake_B - # First, G(A) should fake the discriminator - if self.opt.lambda_GAN > 0.0: - pred_fake = self.netD(fake) - self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN - else: - self.loss_G_GAN = 0.0 - - ## get feat - fake_B_feat = self.netG(self.fake_B, self.nce_layers, encode_only=True) - if self.opt.flip_equivariance and self.flipped_for_equivariance: - fake_B_feat = [torch.flip(fq, [3]) for fq in fake_B_feat] - real_A_feat = self.netG(self.real_A, self.nce_layers, encode_only=True) - - fake_B_pool, sample_ids = self.netF(fake_B_feat, self.opt.num_patches, None) - real_A_pool, _ = self.netF(real_A_feat, self.opt.num_patches, sample_ids) - - if self.opt.dce_idt: - idt_B_feat = self.netG(self.idt_B, self.nce_layers, encode_only=True) - if self.opt.flip_equivariance and self.flipped_for_equivariance: - idt_B_feat = [torch.flip(fq, [3]) for fq in idt_B_feat] - real_B_feat = self.netG(self.real_B, self.nce_layers, encode_only=True) - - idt_B_pool, _ = self.netF(idt_B_feat, self.opt.num_patches, sample_ids) - real_B_pool, _ = self.netF(real_B_feat, self.opt.num_patches, sample_ids) - - - ## Relation Loss - self.loss_SRC, weight = self.calculate_R_loss(real_A_pool, fake_B_pool, epoch=self.train_epoch) - - - ## HDCE - if self.opt.lambda_HDCE > 0.0: - self.loss_HDCE = self.calculate_HDCE_loss(real_A_pool, fake_B_pool, weight) - else: - self.loss_HDCE, self.loss_HDCE_bd = 0.0, 0.0 - - self.loss_HDCE_Y = 0 - if self.opt.dce_idt and self.opt.lambda_HDCE > 0.0: - _, weight_idt = self.calculate_R_loss(real_B_pool, idt_B_pool, only_weight=True, epoch=self.train_epoch) - self.loss_HDCE_Y = self.calculate_HDCE_loss(real_B_pool, idt_B_pool, weight_idt) - loss_HDCE_both = (self.loss_HDCE + self.loss_HDCE_Y) * 0.5 - else: - loss_HDCE_both = self.loss_HDCE - - self.loss_G = self.loss_G_GAN + loss_HDCE_both + self.loss_SRC - return self.loss_G - - - def calculate_HDCE_loss(self, src, tgt, weight=None): - n_layers = len(self.nce_layers) - - feat_q_pool = tgt - feat_k_pool = src - - total_HDCE_loss = 0.0 - for f_q, f_k, crit, nce_layer, w in zip(feat_q_pool, feat_k_pool, self.criterionHDCE, self.nce_layers, weight): - if self.opt.no_Hneg: - w = None - loss = crit(f_q, f_k, w) * self.opt.lambda_HDCE - total_HDCE_loss += loss.mean() - - return total_HDCE_loss / n_layers - - - def calculate_R_loss(self, src, tgt, only_weight=False, epoch=None): - n_layers = len(self.nce_layers) - - feat_q_pool = tgt - feat_k_pool = src - - total_SRC_loss = 0.0 - weights=[] - for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionR, self.nce_layers): - loss_SRC, weight = crit(f_q, f_k, only_weight, epoch) - total_SRC_loss += loss_SRC * self.opt.lambda_SRC - weights.append(weight) - return total_SRC_loss / n_layers, weights - - -#-------------------------------------------------------------------------------------------------------- - def calculate_Patchloss(self, src, tgt, num_patch=4): - - feat_org = self.netG(src, mode='encoder') - if self.opt.flip_equivariance and self.flipped_for_equivariance: - feat_org = torch.flip(feat_org, [3]) - - N,C,H,W = feat_org.size() - - ps = H//num_patch - lam = get_spa_lambda(self.alpha,size=(1,1,num_patch**2),device = feat_org.device) - feat_org_unfold = F.unfold(feat_org,kernel_size=(ps,ps),padding=0,stride=ps) - - rndperm = torch.randperm(feat_org_unfold.size(2)) - feat_prm = feat_org_unfold[:,:,rndperm] - feat_mix = lam*feat_org_unfold + (1-lam)*feat_prm - feat_mix = F.fold(feat_mix,output_size=(H,W),kernel_size=(ps,ps),padding=0,stride=ps) - - out_mix = self.netG(feat_mix,mode='decoder') - feat_mix_rec = self.netG(out_mix,mode='encoder') - - fake_feat = self.netG(tgt,mode='encoder') - - fake_feat_unfold = F.unfold(fake_feat,kernel_size=(ps,ps),padding=0,stride=ps) - fake_feat_prm = fake_feat_unfold[:,:,rndperm] - fake_feat_mix = lam*fake_feat_unfold + (1-lam)*fake_feat_prm - fake_feat_mix = F.fold(fake_feat_mix,output_size=(H,W),kernel_size=(ps,ps),padding=0,stride=ps) - - - PM_loss = torch.mean(torch.abs(fake_feat_mix - feat_mix_rec)) - - return 10*PM_loss - -#-------------------------------------------------------------------------------------------------------- \ No newline at end of file diff --git a/spaces/a-v-bely/spanish-task-generator/utilities_option_menu/frontend/dist/js/app.0d11cb9b.js b/spaces/a-v-bely/spanish-task-generator/utilities_option_menu/frontend/dist/js/app.0d11cb9b.js deleted file mode 100644 index e20534954620d7ca7e8e7a7d8639204fce247502..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/spanish-task-generator/utilities_option_menu/frontend/dist/js/app.0d11cb9b.js +++ /dev/null @@ -1,18 +0,0 @@ -(function(e){function t(t){for(var r,o,a=t[0],l=t[1],s=t[2],b=0,d=[];b 1MB) over Kafka requires special tuning and is generally discouraged. - -Depending on the type of relationships, there are different strategies for dealing with high cardinality. - -### 1:N Relationships - -When `N` is large, simply store the relationship as a reverse pointer on the `N` side, instead of an `N`-element array on the `1` side. In other words, instead of doing this - -``` -record MemberList { - members: array[UserUrn] -} -``` - -do this - -``` -record Membership { - group: GroupUrn -} -``` - -One drawback with this approach is that batch updating the member list becomes multiple DB operations and non-atomic. If the list is provided by an external metadata provider via [MCEs](../what/mxe.md), this also means that multiple MCEs will be required to update the list, instead of having one giant array in a single MCE. - -### M:N Relationships - -When one side of the relation (`M` or `N`) has low cardinality, you can apply the same trick in [1:N Relationship] by creating the array on the side with low-cardinality. For example, assuming a user can only be part of a small number of groups but each group can have a large number of users, the following model will be more efficient than the reverse. - -``` -record Membership { - groups: array[GroupUrn] -} -``` - -When both `M` and `N` are of high cardinality (e.g. millions of users, each belongs to million of groups), the only way to store such relationships efficiently is by creating a new "Mapping Entity" with a single aspect like this - -``` -record UserGroupMap { - user: UserUrn - group: GroupUrn -} -``` - -This means that the relationship now can only be created & updated at a single source-destination pair granularity. diff --git a/spaces/abhishek/diffuzers/Dockerfile b/spaces/abhishek/diffuzers/Dockerfile deleted file mode 100644 index ad42fa472fdaebfb9415c13dd6b547f2c92afa69..0000000000000000000000000000000000000000 --- a/spaces/abhishek/diffuzers/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM abhishekkrthakur/diffuzers:latest -RUN chown -R 1000:1000 /app - -CMD diffuzers run --port 7860 \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/parallel/registry.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/parallel/registry.py deleted file mode 100644 index a204a07fba10e614223f090d1a57cf9c4d74d4a1..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/parallel/registry.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch.nn.parallel import DataParallel, DistributedDataParallel - -from annotator.uniformer.mmcv.utils import Registry - -MODULE_WRAPPERS = Registry('module wrapper') -MODULE_WRAPPERS.register_module(module=DataParallel) -MODULE_WRAPPERS.register_module(module=DistributedDataParallel) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/yolo_neck.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/yolo_neck.py deleted file mode 100644 index c2f9b9ef3859796c284c16ad1a92fe41ecbed613..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/yolo_neck.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule - -from ..builder import NECKS - - -class DetectionBlock(nn.Module): - """Detection block in YOLO neck. - - Let out_channels = n, the DetectionBlock contains: - Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. - The first 6 ConvLayers are formed the following way: - 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. - The Conv2D layer is 1x1x255. - Some block will have branch after the fifth ConvLayer. - The input channel is arbitrary (in_channels) - - Args: - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - """ - - def __init__(self, - in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): - super(DetectionBlock, self).__init__() - double_out_channels = out_channels * 2 - - # shortcut - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) - self.conv2 = ConvModule( - out_channels, double_out_channels, 3, padding=1, **cfg) - self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) - self.conv4 = ConvModule( - out_channels, double_out_channels, 3, padding=1, **cfg) - self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) - - def forward(self, x): - tmp = self.conv1(x) - tmp = self.conv2(tmp) - tmp = self.conv3(tmp) - tmp = self.conv4(tmp) - out = self.conv5(tmp) - return out - - -@NECKS.register_module() -class YOLOV3Neck(nn.Module): - """The neck of YOLOV3. - - It can be treated as a simplified version of FPN. It - will take the result from Darknet backbone and do some upsampling and - concatenation. It will finally output the detection result. - - Note: - The input feats should be from top to bottom. - i.e., from high-lvl to low-lvl - But YOLOV3Neck will process them in reversed order. - i.e., from bottom (high-lvl) to top (low-lvl) - - Args: - num_scales (int): The number of scales / stages. - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - """ - - def __init__(self, - num_scales, - in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): - super(YOLOV3Neck, self).__init__() - assert (num_scales == len(in_channels) == len(out_channels)) - self.num_scales = num_scales - self.in_channels = in_channels - self.out_channels = out_channels - - # shortcut - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - # To support arbitrary scales, the code looks awful, but it works. - # Better solution is welcomed. - self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) - for i in range(1, self.num_scales): - in_c, out_c = self.in_channels[i], self.out_channels[i] - self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg)) - # in_c + out_c : High-lvl feats will be cat with low-lvl feats - self.add_module(f'detect{i+1}', - DetectionBlock(in_c + out_c, out_c, **cfg)) - - def forward(self, feats): - assert len(feats) == self.num_scales - - # processed from bottom (high-lvl) to top (low-lvl) - outs = [] - out = self.detect1(feats[-1]) - outs.append(out) - - for i, x in enumerate(reversed(feats[:-1])): - conv = getattr(self, f'conv{i+1}') - tmp = conv(out) - - # Cat with low-lvl feats - tmp = F.interpolate(tmp, scale_factor=2) - tmp = torch.cat((tmp, x), 1) - - detect = getattr(self, f'detect{i+2}') - out = detect(tmp) - outs.append(out) - - return tuple(outs) - - def init_weights(self): - """Initialize the weights of module.""" - # init is done in ConvModule - pass diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/parallel/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/parallel/__init__.py deleted file mode 100644 index 2ed2c17ad357742e423beeaf4d35db03fe9af469..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/parallel/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .collate import collate -from .data_container import DataContainer -from .data_parallel import MMDataParallel -from .distributed import MMDistributedDataParallel -from .registry import MODULE_WRAPPERS -from .scatter_gather import scatter, scatter_kwargs -from .utils import is_module_wrapper - -__all__ = [ - 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', - 'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS' -] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/utils.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/utils.py deleted file mode 100644 index c5befb8e56ece50b5fecfd007b26f8a29124c0bd..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/utils.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import random -import sys -import time -import warnings -from getpass import getuser -from socket import gethostname - -import numpy as np -import torch - -import annotator.uniformer.mmcv as mmcv - - -def get_host_info(): - """Get hostname and username. - - Return empty string if exception raised, e.g. ``getpass.getuser()`` will - lead to error in docker container - """ - host = '' - try: - host = f'{getuser()}@{gethostname()}' - except Exception as e: - warnings.warn(f'Host or user not found: {str(e)}') - finally: - return host - - -def get_time_str(): - return time.strftime('%Y%m%d_%H%M%S', time.localtime()) - - -def obj_from_dict(info, parent=None, default_args=None): - """Initialize an object from dict. - - The dict must contain the key "type", which indicates the object type, it - can be either a string or type, such as "list" or ``list``. Remaining - fields are treated as the arguments for constructing the object. - - Args: - info (dict): Object types and arguments. - parent (:class:`module`): Module which may containing expected object - classes. - default_args (dict, optional): Default arguments for initializing the - object. - - Returns: - any type: Object built from the dict. - """ - assert isinstance(info, dict) and 'type' in info - assert isinstance(default_args, dict) or default_args is None - args = info.copy() - obj_type = args.pop('type') - if mmcv.is_str(obj_type): - if parent is not None: - obj_type = getattr(parent, obj_type) - else: - obj_type = sys.modules[obj_type] - elif not isinstance(obj_type, type): - raise TypeError('type must be a str or valid type, but ' - f'got {type(obj_type)}') - if default_args is not None: - for name, value in default_args.items(): - args.setdefault(name, value) - return obj_type(**args) - - -def set_random_seed(seed, deterministic=False, use_rank_shift=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - rank_shift (bool): Whether to add rank number to the random seed to - have different random seed in different threads. Default: False. - """ - if use_rank_shift: - rank, _ = mmcv.runner.get_dist_info() - seed += rank - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/gl/gl_compat.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/gl/gl_compat.py deleted file mode 100644 index 10e1af6893a05042c394de19e8a7589e82684bc2..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/gl/gl_compat.py +++ /dev/null @@ -1,5949 +0,0 @@ -"""Wrapper for https://raw.githubusercontent.com/KhronosGroup/OpenGL-Registry/master/xml/gl.xml -Generated by tools/gengl.py. -Do not modify this file. -""" - -from ctypes import * -from pyglet.gl.lib import link_GL as _link_function -from pyglet.gl.lib import c_ptrdiff_t - -class struct___GLsync(Structure): - __slots__ = [ - ] -struct___GLsync._fields_ = [ - ('_opaque_struct', c_int) -] - -# END OF gl.template - -# GL type definitions -GLenum = c_uint -GLboolean = c_ubyte -GLbitfield = c_uint -GLvoid = None -GLbyte = c_char -GLubyte = c_ubyte -GLshort = c_short -GLushort = c_ushort -GLint = c_int -GLuint = c_uint -GLclampx = c_uint -GLsizei = c_int -GLfloat = c_float -GLclampf = c_float -GLdouble = c_double -GLclampd = c_double -GLchar = c_char -GLintptr = c_ptrdiff_t -GLsizeiptr = c_ptrdiff_t -GLint64 = c_int64 -GLuint64 = c_uint64 -GLsync = POINTER(struct___GLsync) -GLDEBUGPROC = CFUNCTYPE(None, GLenum, GLenum, GLuint, GLenum, GLsizei, POINTER(GLchar), POINTER(GLvoid)) - -# GL enumerant (token) definitions -GL_FALSE = 0 -GL_POINTS = 0 -GL_ZERO = 0 -GL_NONE = 0 -GL_NO_ERROR = 0 -GL_TRUE = 1 -GL_LINES = 1 -GL_ONE = 1 -GL_CURRENT_BIT = 1 -GL_CLIENT_PIXEL_STORE_BIT = 1 -GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT = 1 -GL_MAP_READ_BIT = 1 -GL_CONTEXT_CORE_PROFILE_BIT = 1 -GL_SYNC_FLUSH_COMMANDS_BIT = 1 -GL_VERTEX_SHADER_BIT = 1 -GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT = 1 -GL_LINE_LOOP = 2 -GL_POINT_BIT = 2 -GL_CLIENT_VERTEX_ARRAY_BIT = 2 -GL_MAP_WRITE_BIT = 2 -GL_CONTEXT_COMPATIBILITY_PROFILE_BIT = 2 -GL_FRAGMENT_SHADER_BIT = 2 -GL_ELEMENT_ARRAY_BARRIER_BIT = 2 -GL_CONTEXT_FLAG_DEBUG_BIT = 2 -GL_LINE_STRIP = 3 -GL_TRIANGLES = 4 -GL_LINE_BIT = 4 -GL_MAP_INVALIDATE_RANGE_BIT = 4 -GL_GEOMETRY_SHADER_BIT = 4 -GL_UNIFORM_BARRIER_BIT = 4 -GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT = 4 -GL_TRIANGLE_STRIP = 5 -GL_TRIANGLE_FAN = 6 -GL_QUADS = 7 -GL_POLYGON_BIT = 8 -GL_QUAD_STRIP = 8 -GL_MAP_INVALIDATE_BUFFER_BIT = 8 -GL_TESS_CONTROL_SHADER_BIT = 8 -GL_TEXTURE_FETCH_BARRIER_BIT = 8 -GL_CONTEXT_FLAG_NO_ERROR_BIT = 8 -GL_POLYGON = 9 -GL_LINES_ADJACENCY = 10 -GL_LINE_STRIP_ADJACENCY = 11 -GL_TRIANGLES_ADJACENCY = 12 -GL_TRIANGLE_STRIP_ADJACENCY = 13 -GL_PATCHES = 14 -GL_POLYGON_STIPPLE_BIT = 16 -GL_MAP_FLUSH_EXPLICIT_BIT = 16 -GL_TESS_EVALUATION_SHADER_BIT = 16 -GL_PIXEL_MODE_BIT = 32 -GL_MAP_UNSYNCHRONIZED_BIT = 32 -GL_SHADER_IMAGE_ACCESS_BARRIER_BIT = 32 -GL_COMPUTE_SHADER_BIT = 32 -GL_LIGHTING_BIT = 64 -GL_COMMAND_BARRIER_BIT = 64 -GL_MAP_PERSISTENT_BIT = 64 -GL_FOG_BIT = 128 -GL_PIXEL_BUFFER_BARRIER_BIT = 128 -GL_MAP_COHERENT_BIT = 128 -GL_DEPTH_BUFFER_BIT = 256 -GL_ACCUM = 256 -GL_TEXTURE_UPDATE_BARRIER_BIT = 256 -GL_DYNAMIC_STORAGE_BIT = 256 -GL_LOAD = 257 -GL_RETURN = 258 -GL_MULT = 259 -GL_ADD = 260 -GL_NEVER = 512 -GL_ACCUM_BUFFER_BIT = 512 -GL_BUFFER_UPDATE_BARRIER_BIT = 512 -GL_CLIENT_STORAGE_BIT = 512 -GL_LESS = 513 -GL_EQUAL = 514 -GL_LEQUAL = 515 -GL_GREATER = 516 -GL_NOTEQUAL = 517 -GL_GEQUAL = 518 -GL_ALWAYS = 519 -GL_SRC_COLOR = 768 -GL_ONE_MINUS_SRC_COLOR = 769 -GL_SRC_ALPHA = 770 -GL_ONE_MINUS_SRC_ALPHA = 771 -GL_DST_ALPHA = 772 -GL_ONE_MINUS_DST_ALPHA = 773 -GL_DST_COLOR = 774 -GL_ONE_MINUS_DST_COLOR = 775 -GL_SRC_ALPHA_SATURATE = 776 -GL_STENCIL_BUFFER_BIT = 1024 -GL_FRONT_LEFT = 1024 -GL_FRAMEBUFFER_BARRIER_BIT = 1024 -GL_FRONT_RIGHT = 1025 -GL_BACK_LEFT = 1026 -GL_BACK_RIGHT = 1027 -GL_FRONT = 1028 -GL_BACK = 1029 -GL_LEFT = 1030 -GL_RIGHT = 1031 -GL_FRONT_AND_BACK = 1032 -GL_AUX0 = 1033 -GL_AUX1 = 1034 -GL_AUX2 = 1035 -GL_AUX3 = 1036 -GL_INVALID_ENUM = 1280 -GL_INVALID_VALUE = 1281 -GL_INVALID_OPERATION = 1282 -GL_STACK_OVERFLOW = 1283 -GL_STACK_UNDERFLOW = 1284 -GL_OUT_OF_MEMORY = 1285 -GL_INVALID_FRAMEBUFFER_OPERATION = 1286 -GL_INVALID_FRAMEBUFFER_OPERATION_EXT = 1286 -GL_CONTEXT_LOST = 1287 -GL_2D = 1536 -GL_3D = 1537 -GL_3D_COLOR = 1538 -GL_3D_COLOR_TEXTURE = 1539 -GL_4D_COLOR_TEXTURE = 1540 -GL_PASS_THROUGH_TOKEN = 1792 -GL_POINT_TOKEN = 1793 -GL_LINE_TOKEN = 1794 -GL_POLYGON_TOKEN = 1795 -GL_BITMAP_TOKEN = 1796 -GL_DRAW_PIXEL_TOKEN = 1797 -GL_COPY_PIXEL_TOKEN = 1798 -GL_LINE_RESET_TOKEN = 1799 -GL_VIEWPORT_BIT = 2048 -GL_EXP = 2048 -GL_TRANSFORM_FEEDBACK_BARRIER_BIT = 2048 -GL_EXP2 = 2049 -GL_CW = 2304 -GL_CCW = 2305 -GL_COEFF = 2560 -GL_ORDER = 2561 -GL_DOMAIN = 2562 -GL_CURRENT_COLOR = 2816 -GL_CURRENT_INDEX = 2817 -GL_CURRENT_NORMAL = 2818 -GL_CURRENT_TEXTURE_COORDS = 2819 -GL_CURRENT_RASTER_COLOR = 2820 -GL_CURRENT_RASTER_INDEX = 2821 -GL_CURRENT_RASTER_TEXTURE_COORDS = 2822 -GL_CURRENT_RASTER_POSITION = 2823 -GL_CURRENT_RASTER_POSITION_VALID = 2824 -GL_CURRENT_RASTER_DISTANCE = 2825 -GL_POINT_SMOOTH = 2832 -GL_POINT_SIZE = 2833 -GL_POINT_SIZE_RANGE = 2834 -GL_SMOOTH_POINT_SIZE_RANGE = 2834 -GL_POINT_SIZE_GRANULARITY = 2835 -GL_SMOOTH_POINT_SIZE_GRANULARITY = 2835 -GL_LINE_SMOOTH = 2848 -GL_LINE_WIDTH = 2849 -GL_LINE_WIDTH_RANGE = 2850 -GL_SMOOTH_LINE_WIDTH_RANGE = 2850 -GL_LINE_WIDTH_GRANULARITY = 2851 -GL_SMOOTH_LINE_WIDTH_GRANULARITY = 2851 -GL_LINE_STIPPLE = 2852 -GL_LINE_STIPPLE_PATTERN = 2853 -GL_LINE_STIPPLE_REPEAT = 2854 -GL_LIST_MODE = 2864 -GL_MAX_LIST_NESTING = 2865 -GL_LIST_BASE = 2866 -GL_LIST_INDEX = 2867 -GL_POLYGON_MODE = 2880 -GL_POLYGON_SMOOTH = 2881 -GL_POLYGON_STIPPLE = 2882 -GL_EDGE_FLAG = 2883 -GL_CULL_FACE = 2884 -GL_CULL_FACE_MODE = 2885 -GL_FRONT_FACE = 2886 -GL_LIGHTING = 2896 -GL_LIGHT_MODEL_LOCAL_VIEWER = 2897 -GL_LIGHT_MODEL_TWO_SIDE = 2898 -GL_LIGHT_MODEL_AMBIENT = 2899 -GL_SHADE_MODEL = 2900 -GL_COLOR_MATERIAL_FACE = 2901 -GL_COLOR_MATERIAL_PARAMETER = 2902 -GL_COLOR_MATERIAL = 2903 -GL_FOG = 2912 -GL_FOG_INDEX = 2913 -GL_FOG_DENSITY = 2914 -GL_FOG_START = 2915 -GL_FOG_END = 2916 -GL_FOG_MODE = 2917 -GL_FOG_COLOR = 2918 -GL_DEPTH_RANGE = 2928 -GL_DEPTH_TEST = 2929 -GL_DEPTH_WRITEMASK = 2930 -GL_DEPTH_CLEAR_VALUE = 2931 -GL_DEPTH_FUNC = 2932 -GL_ACCUM_CLEAR_VALUE = 2944 -GL_STENCIL_TEST = 2960 -GL_STENCIL_CLEAR_VALUE = 2961 -GL_STENCIL_FUNC = 2962 -GL_STENCIL_VALUE_MASK = 2963 -GL_STENCIL_FAIL = 2964 -GL_STENCIL_PASS_DEPTH_FAIL = 2965 -GL_STENCIL_PASS_DEPTH_PASS = 2966 -GL_STENCIL_REF = 2967 -GL_STENCIL_WRITEMASK = 2968 -GL_MATRIX_MODE = 2976 -GL_NORMALIZE = 2977 -GL_VIEWPORT = 2978 -GL_MODELVIEW_STACK_DEPTH = 2979 -GL_PROJECTION_STACK_DEPTH = 2980 -GL_TEXTURE_STACK_DEPTH = 2981 -GL_MODELVIEW_MATRIX = 2982 -GL_PROJECTION_MATRIX = 2983 -GL_TEXTURE_MATRIX = 2984 -GL_ATTRIB_STACK_DEPTH = 2992 -GL_CLIENT_ATTRIB_STACK_DEPTH = 2993 -GL_ALPHA_TEST = 3008 -GL_ALPHA_TEST_FUNC = 3009 -GL_ALPHA_TEST_REF = 3010 -GL_DITHER = 3024 -GL_BLEND_DST = 3040 -GL_BLEND_SRC = 3041 -GL_BLEND = 3042 -GL_LOGIC_OP_MODE = 3056 -GL_LOGIC_OP = 3057 -GL_INDEX_LOGIC_OP = 3057 -GL_COLOR_LOGIC_OP = 3058 -GL_AUX_BUFFERS = 3072 -GL_DRAW_BUFFER = 3073 -GL_READ_BUFFER = 3074 -GL_SCISSOR_BOX = 3088 -GL_SCISSOR_TEST = 3089 -GL_INDEX_CLEAR_VALUE = 3104 -GL_INDEX_WRITEMASK = 3105 -GL_COLOR_CLEAR_VALUE = 3106 -GL_COLOR_WRITEMASK = 3107 -GL_INDEX_MODE = 3120 -GL_RGBA_MODE = 3121 -GL_DOUBLEBUFFER = 3122 -GL_STEREO = 3123 -GL_RENDER_MODE = 3136 -GL_PERSPECTIVE_CORRECTION_HINT = 3152 -GL_POINT_SMOOTH_HINT = 3153 -GL_LINE_SMOOTH_HINT = 3154 -GL_POLYGON_SMOOTH_HINT = 3155 -GL_FOG_HINT = 3156 -GL_TEXTURE_GEN_S = 3168 -GL_TEXTURE_GEN_T = 3169 -GL_TEXTURE_GEN_R = 3170 -GL_TEXTURE_GEN_Q = 3171 -GL_PIXEL_MAP_I_TO_I = 3184 -GL_PIXEL_MAP_S_TO_S = 3185 -GL_PIXEL_MAP_I_TO_R = 3186 -GL_PIXEL_MAP_I_TO_G = 3187 -GL_PIXEL_MAP_I_TO_B = 3188 -GL_PIXEL_MAP_I_TO_A = 3189 -GL_PIXEL_MAP_R_TO_R = 3190 -GL_PIXEL_MAP_G_TO_G = 3191 -GL_PIXEL_MAP_B_TO_B = 3192 -GL_PIXEL_MAP_A_TO_A = 3193 -GL_PIXEL_MAP_I_TO_I_SIZE = 3248 -GL_PIXEL_MAP_S_TO_S_SIZE = 3249 -GL_PIXEL_MAP_I_TO_R_SIZE = 3250 -GL_PIXEL_MAP_I_TO_G_SIZE = 3251 -GL_PIXEL_MAP_I_TO_B_SIZE = 3252 -GL_PIXEL_MAP_I_TO_A_SIZE = 3253 -GL_PIXEL_MAP_R_TO_R_SIZE = 3254 -GL_PIXEL_MAP_G_TO_G_SIZE = 3255 -GL_PIXEL_MAP_B_TO_B_SIZE = 3256 -GL_PIXEL_MAP_A_TO_A_SIZE = 3257 -GL_UNPACK_SWAP_BYTES = 3312 -GL_UNPACK_LSB_FIRST = 3313 -GL_UNPACK_ROW_LENGTH = 3314 -GL_UNPACK_SKIP_ROWS = 3315 -GL_UNPACK_SKIP_PIXELS = 3316 -GL_UNPACK_ALIGNMENT = 3317 -GL_PACK_SWAP_BYTES = 3328 -GL_PACK_LSB_FIRST = 3329 -GL_PACK_ROW_LENGTH = 3330 -GL_PACK_SKIP_ROWS = 3331 -GL_PACK_SKIP_PIXELS = 3332 -GL_PACK_ALIGNMENT = 3333 -GL_MAP_COLOR = 3344 -GL_MAP_STENCIL = 3345 -GL_INDEX_SHIFT = 3346 -GL_INDEX_OFFSET = 3347 -GL_RED_SCALE = 3348 -GL_RED_BIAS = 3349 -GL_ZOOM_X = 3350 -GL_ZOOM_Y = 3351 -GL_GREEN_SCALE = 3352 -GL_GREEN_BIAS = 3353 -GL_BLUE_SCALE = 3354 -GL_BLUE_BIAS = 3355 -GL_ALPHA_SCALE = 3356 -GL_ALPHA_BIAS = 3357 -GL_DEPTH_SCALE = 3358 -GL_DEPTH_BIAS = 3359 -GL_MAX_EVAL_ORDER = 3376 -GL_MAX_LIGHTS = 3377 -GL_MAX_CLIP_PLANES = 3378 -GL_MAX_CLIP_DISTANCES = 3378 -GL_MAX_TEXTURE_SIZE = 3379 -GL_MAX_PIXEL_MAP_TABLE = 3380 -GL_MAX_ATTRIB_STACK_DEPTH = 3381 -GL_MAX_MODELVIEW_STACK_DEPTH = 3382 -GL_MAX_NAME_STACK_DEPTH = 3383 -GL_MAX_PROJECTION_STACK_DEPTH = 3384 -GL_MAX_TEXTURE_STACK_DEPTH = 3385 -GL_MAX_VIEWPORT_DIMS = 3386 -GL_MAX_CLIENT_ATTRIB_STACK_DEPTH = 3387 -GL_SUBPIXEL_BITS = 3408 -GL_INDEX_BITS = 3409 -GL_RED_BITS = 3410 -GL_GREEN_BITS = 3411 -GL_BLUE_BITS = 3412 -GL_ALPHA_BITS = 3413 -GL_DEPTH_BITS = 3414 -GL_STENCIL_BITS = 3415 -GL_ACCUM_RED_BITS = 3416 -GL_ACCUM_GREEN_BITS = 3417 -GL_ACCUM_BLUE_BITS = 3418 -GL_ACCUM_ALPHA_BITS = 3419 -GL_NAME_STACK_DEPTH = 3440 -GL_AUTO_NORMAL = 3456 -GL_MAP1_COLOR_4 = 3472 -GL_MAP1_INDEX = 3473 -GL_MAP1_NORMAL = 3474 -GL_MAP1_TEXTURE_COORD_1 = 3475 -GL_MAP1_TEXTURE_COORD_2 = 3476 -GL_MAP1_TEXTURE_COORD_3 = 3477 -GL_MAP1_TEXTURE_COORD_4 = 3478 -GL_MAP1_VERTEX_3 = 3479 -GL_MAP1_VERTEX_4 = 3480 -GL_MAP2_COLOR_4 = 3504 -GL_MAP2_INDEX = 3505 -GL_MAP2_NORMAL = 3506 -GL_MAP2_TEXTURE_COORD_1 = 3507 -GL_MAP2_TEXTURE_COORD_2 = 3508 -GL_MAP2_TEXTURE_COORD_3 = 3509 -GL_MAP2_TEXTURE_COORD_4 = 3510 -GL_MAP2_VERTEX_3 = 3511 -GL_MAP2_VERTEX_4 = 3512 -GL_MAP1_GRID_DOMAIN = 3536 -GL_MAP1_GRID_SEGMENTS = 3537 -GL_MAP2_GRID_DOMAIN = 3538 -GL_MAP2_GRID_SEGMENTS = 3539 -GL_TEXTURE_1D = 3552 -GL_TEXTURE_2D = 3553 -GL_FEEDBACK_BUFFER_POINTER = 3568 -GL_FEEDBACK_BUFFER_SIZE = 3569 -GL_FEEDBACK_BUFFER_TYPE = 3570 -GL_SELECTION_BUFFER_POINTER = 3571 -GL_SELECTION_BUFFER_SIZE = 3572 -GL_TEXTURE_WIDTH = 4096 -GL_TRANSFORM_BIT = 4096 -GL_ATOMIC_COUNTER_BARRIER_BIT = 4096 -GL_TEXTURE_HEIGHT = 4097 -GL_TEXTURE_COMPONENTS = 4099 -GL_TEXTURE_INTERNAL_FORMAT = 4099 -GL_TEXTURE_BORDER_COLOR = 4100 -GL_TEXTURE_BORDER = 4101 -GL_TEXTURE_TARGET = 4102 -GL_DONT_CARE = 4352 -GL_FASTEST = 4353 -GL_NICEST = 4354 -GL_AMBIENT = 4608 -GL_DIFFUSE = 4609 -GL_SPECULAR = 4610 -GL_POSITION = 4611 -GL_SPOT_DIRECTION = 4612 -GL_SPOT_EXPONENT = 4613 -GL_SPOT_CUTOFF = 4614 -GL_CONSTANT_ATTENUATION = 4615 -GL_LINEAR_ATTENUATION = 4616 -GL_QUADRATIC_ATTENUATION = 4617 -GL_COMPILE = 4864 -GL_COMPILE_AND_EXECUTE = 4865 -GL_BYTE = 5120 -GL_UNSIGNED_BYTE = 5121 -GL_SHORT = 5122 -GL_UNSIGNED_SHORT = 5123 -GL_INT = 5124 -GL_UNSIGNED_INT = 5125 -GL_FLOAT = 5126 -GL_2_BYTES = 5127 -GL_3_BYTES = 5128 -GL_4_BYTES = 5129 -GL_DOUBLE = 5130 -GL_HALF_FLOAT = 5131 -GL_FIXED = 5132 -GL_CLEAR = 5376 -GL_AND = 5377 -GL_AND_REVERSE = 5378 -GL_COPY = 5379 -GL_AND_INVERTED = 5380 -GL_NOOP = 5381 -GL_XOR = 5382 -GL_OR = 5383 -GL_NOR = 5384 -GL_EQUIV = 5385 -GL_INVERT = 5386 -GL_OR_REVERSE = 5387 -GL_COPY_INVERTED = 5388 -GL_OR_INVERTED = 5389 -GL_NAND = 5390 -GL_SET = 5391 -GL_EMISSION = 5632 -GL_SHININESS = 5633 -GL_AMBIENT_AND_DIFFUSE = 5634 -GL_COLOR_INDEXES = 5635 -GL_MODELVIEW = 5888 -GL_PROJECTION = 5889 -GL_TEXTURE = 5890 -GL_COLOR = 6144 -GL_DEPTH = 6145 -GL_STENCIL = 6146 -GL_COLOR_INDEX = 6400 -GL_STENCIL_INDEX = 6401 -GL_DEPTH_COMPONENT = 6402 -GL_RED = 6403 -GL_GREEN = 6404 -GL_BLUE = 6405 -GL_ALPHA = 6406 -GL_RGB = 6407 -GL_RGBA = 6408 -GL_LUMINANCE = 6409 -GL_LUMINANCE_ALPHA = 6410 -GL_BITMAP = 6656 -GL_POINT = 6912 -GL_LINE = 6913 -GL_FILL = 6914 -GL_RENDER = 7168 -GL_FEEDBACK = 7169 -GL_SELECT = 7170 -GL_FLAT = 7424 -GL_SMOOTH = 7425 -GL_KEEP = 7680 -GL_REPLACE = 7681 -GL_INCR = 7682 -GL_DECR = 7683 -GL_VENDOR = 7936 -GL_RENDERER = 7937 -GL_VERSION = 7938 -GL_EXTENSIONS = 7939 -GL_ENABLE_BIT = 8192 -GL_S = 8192 -GL_SHADER_STORAGE_BARRIER_BIT = 8192 -GL_T = 8193 -GL_R = 8194 -GL_Q = 8195 -GL_MODULATE = 8448 -GL_DECAL = 8449 -GL_TEXTURE_ENV_MODE = 8704 -GL_TEXTURE_ENV_COLOR = 8705 -GL_TEXTURE_ENV = 8960 -GL_EYE_LINEAR = 9216 -GL_OBJECT_LINEAR = 9217 -GL_SPHERE_MAP = 9218 -GL_TEXTURE_GEN_MODE = 9472 -GL_OBJECT_PLANE = 9473 -GL_EYE_PLANE = 9474 -GL_NEAREST = 9728 -GL_LINEAR = 9729 -GL_NEAREST_MIPMAP_NEAREST = 9984 -GL_LINEAR_MIPMAP_NEAREST = 9985 -GL_NEAREST_MIPMAP_LINEAR = 9986 -GL_LINEAR_MIPMAP_LINEAR = 9987 -GL_TEXTURE_MAG_FILTER = 10240 -GL_TEXTURE_MIN_FILTER = 10241 -GL_TEXTURE_WRAP_S = 10242 -GL_TEXTURE_WRAP_T = 10243 -GL_CLAMP = 10496 -GL_REPEAT = 10497 -GL_POLYGON_OFFSET_UNITS = 10752 -GL_POLYGON_OFFSET_POINT = 10753 -GL_POLYGON_OFFSET_LINE = 10754 -GL_R3_G3_B2 = 10768 -GL_V2F = 10784 -GL_V3F = 10785 -GL_C4UB_V2F = 10786 -GL_C4UB_V3F = 10787 -GL_C3F_V3F = 10788 -GL_N3F_V3F = 10789 -GL_C4F_N3F_V3F = 10790 -GL_T2F_V3F = 10791 -GL_T4F_V4F = 10792 -GL_T2F_C4UB_V3F = 10793 -GL_T2F_C3F_V3F = 10794 -GL_T2F_N3F_V3F = 10795 -GL_T2F_C4F_N3F_V3F = 10796 -GL_T4F_C4F_N3F_V4F = 10797 -GL_CLIP_PLANE0 = 12288 -GL_CLIP_DISTANCE0 = 12288 -GL_CLIP_PLANE1 = 12289 -GL_CLIP_DISTANCE1 = 12289 -GL_CLIP_PLANE2 = 12290 -GL_CLIP_DISTANCE2 = 12290 -GL_CLIP_PLANE3 = 12291 -GL_CLIP_DISTANCE3 = 12291 -GL_CLIP_PLANE4 = 12292 -GL_CLIP_DISTANCE4 = 12292 -GL_CLIP_PLANE5 = 12293 -GL_CLIP_DISTANCE5 = 12293 -GL_CLIP_DISTANCE6 = 12294 -GL_CLIP_DISTANCE7 = 12295 -GL_COLOR_BUFFER_BIT = 16384 -GL_LIGHT0 = 16384 -GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT = 16384 -GL_LIGHT1 = 16385 -GL_LIGHT2 = 16386 -GL_LIGHT3 = 16387 -GL_LIGHT4 = 16388 -GL_LIGHT5 = 16389 -GL_LIGHT6 = 16390 -GL_LIGHT7 = 16391 -GL_HINT_BIT = 32768 -GL_QUERY_BUFFER_BARRIER_BIT = 32768 -GL_CONSTANT_COLOR = 32769 -GL_ONE_MINUS_CONSTANT_COLOR = 32770 -GL_CONSTANT_ALPHA = 32771 -GL_ONE_MINUS_CONSTANT_ALPHA = 32772 -GL_BLEND_COLOR = 32773 -GL_FUNC_ADD = 32774 -GL_MIN = 32775 -GL_MAX = 32776 -GL_BLEND_EQUATION = 32777 -GL_BLEND_EQUATION_RGB = 32777 -GL_FUNC_SUBTRACT = 32778 -GL_FUNC_REVERSE_SUBTRACT = 32779 -GL_CONVOLUTION_1D = 32784 -GL_CONVOLUTION_2D = 32785 -GL_SEPARABLE_2D = 32786 -GL_HISTOGRAM = 32804 -GL_PROXY_HISTOGRAM = 32805 -GL_MINMAX = 32814 -GL_UNSIGNED_BYTE_3_3_2 = 32818 -GL_UNSIGNED_SHORT_4_4_4_4 = 32819 -GL_UNSIGNED_SHORT_5_5_5_1 = 32820 -GL_UNSIGNED_INT_8_8_8_8 = 32821 -GL_UNSIGNED_INT_10_10_10_2 = 32822 -GL_POLYGON_OFFSET_FILL = 32823 -GL_POLYGON_OFFSET_FACTOR = 32824 -GL_RESCALE_NORMAL = 32826 -GL_ALPHA4 = 32827 -GL_ALPHA8 = 32828 -GL_ALPHA12 = 32829 -GL_ALPHA16 = 32830 -GL_LUMINANCE4 = 32831 -GL_LUMINANCE8 = 32832 -GL_LUMINANCE12 = 32833 -GL_LUMINANCE16 = 32834 -GL_LUMINANCE4_ALPHA4 = 32835 -GL_LUMINANCE6_ALPHA2 = 32836 -GL_LUMINANCE8_ALPHA8 = 32837 -GL_LUMINANCE12_ALPHA4 = 32838 -GL_LUMINANCE12_ALPHA12 = 32839 -GL_LUMINANCE16_ALPHA16 = 32840 -GL_INTENSITY = 32841 -GL_INTENSITY4 = 32842 -GL_INTENSITY8 = 32843 -GL_INTENSITY12 = 32844 -GL_INTENSITY16 = 32845 -GL_RGB4 = 32847 -GL_RGB5 = 32848 -GL_RGB8 = 32849 -GL_RGB10 = 32850 -GL_RGB12 = 32851 -GL_RGB16 = 32852 -GL_RGBA2 = 32853 -GL_RGBA4 = 32854 -GL_RGB5_A1 = 32855 -GL_RGBA8 = 32856 -GL_RGB10_A2 = 32857 -GL_RGBA12 = 32858 -GL_RGBA16 = 32859 -GL_TEXTURE_RED_SIZE = 32860 -GL_TEXTURE_GREEN_SIZE = 32861 -GL_TEXTURE_BLUE_SIZE = 32862 -GL_TEXTURE_ALPHA_SIZE = 32863 -GL_TEXTURE_LUMINANCE_SIZE = 32864 -GL_TEXTURE_INTENSITY_SIZE = 32865 -GL_PROXY_TEXTURE_1D = 32867 -GL_PROXY_TEXTURE_2D = 32868 -GL_TEXTURE_PRIORITY = 32870 -GL_TEXTURE_RESIDENT = 32871 -GL_TEXTURE_BINDING_1D = 32872 -GL_TEXTURE_BINDING_2D = 32873 -GL_TEXTURE_BINDING_3D = 32874 -GL_PACK_SKIP_IMAGES = 32875 -GL_PACK_IMAGE_HEIGHT = 32876 -GL_UNPACK_SKIP_IMAGES = 32877 -GL_UNPACK_IMAGE_HEIGHT = 32878 -GL_TEXTURE_3D = 32879 -GL_PROXY_TEXTURE_3D = 32880 -GL_TEXTURE_DEPTH = 32881 -GL_TEXTURE_WRAP_R = 32882 -GL_MAX_3D_TEXTURE_SIZE = 32883 -GL_VERTEX_ARRAY = 32884 -GL_NORMAL_ARRAY = 32885 -GL_COLOR_ARRAY = 32886 -GL_INDEX_ARRAY = 32887 -GL_TEXTURE_COORD_ARRAY = 32888 -GL_EDGE_FLAG_ARRAY = 32889 -GL_VERTEX_ARRAY_SIZE = 32890 -GL_VERTEX_ARRAY_TYPE = 32891 -GL_VERTEX_ARRAY_STRIDE = 32892 -GL_NORMAL_ARRAY_TYPE = 32894 -GL_NORMAL_ARRAY_STRIDE = 32895 -GL_COLOR_ARRAY_SIZE = 32897 -GL_COLOR_ARRAY_TYPE = 32898 -GL_COLOR_ARRAY_STRIDE = 32899 -GL_INDEX_ARRAY_TYPE = 32901 -GL_INDEX_ARRAY_STRIDE = 32902 -GL_TEXTURE_COORD_ARRAY_SIZE = 32904 -GL_TEXTURE_COORD_ARRAY_TYPE = 32905 -GL_TEXTURE_COORD_ARRAY_STRIDE = 32906 -GL_EDGE_FLAG_ARRAY_STRIDE = 32908 -GL_VERTEX_ARRAY_POINTER = 32910 -GL_NORMAL_ARRAY_POINTER = 32911 -GL_COLOR_ARRAY_POINTER = 32912 -GL_INDEX_ARRAY_POINTER = 32913 -GL_TEXTURE_COORD_ARRAY_POINTER = 32914 -GL_EDGE_FLAG_ARRAY_POINTER = 32915 -GL_MULTISAMPLE = 32925 -GL_MULTISAMPLE_ARB = 32925 -GL_SAMPLE_ALPHA_TO_COVERAGE = 32926 -GL_SAMPLE_ALPHA_TO_COVERAGE_ARB = 32926 -GL_SAMPLE_ALPHA_TO_ONE = 32927 -GL_SAMPLE_ALPHA_TO_ONE_ARB = 32927 -GL_SAMPLE_COVERAGE = 32928 -GL_SAMPLE_COVERAGE_ARB = 32928 -GL_SAMPLE_BUFFERS = 32936 -GL_SAMPLE_BUFFERS_ARB = 32936 -GL_SAMPLES = 32937 -GL_SAMPLES_ARB = 32937 -GL_SAMPLE_COVERAGE_VALUE = 32938 -GL_SAMPLE_COVERAGE_VALUE_ARB = 32938 -GL_SAMPLE_COVERAGE_INVERT = 32939 -GL_SAMPLE_COVERAGE_INVERT_ARB = 32939 -GL_BLEND_DST_RGB = 32968 -GL_BLEND_SRC_RGB = 32969 -GL_BLEND_DST_ALPHA = 32970 -GL_BLEND_SRC_ALPHA = 32971 -GL_COLOR_TABLE = 32976 -GL_POST_CONVOLUTION_COLOR_TABLE = 32977 -GL_POST_COLOR_MATRIX_COLOR_TABLE = 32978 -GL_PROXY_COLOR_TABLE = 32979 -GL_PROXY_POST_CONVOLUTION_COLOR_TABLE = 32980 -GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE = 32981 -GL_BGR = 32992 -GL_BGRA = 32993 -GL_MAX_ELEMENTS_VERTICES = 33000 -GL_MAX_ELEMENTS_INDICES = 33001 -GL_PARAMETER_BUFFER = 33006 -GL_PARAMETER_BUFFER_BINDING = 33007 -GL_POINT_SIZE_MIN = 33062 -GL_POINT_SIZE_MAX = 33063 -GL_POINT_FADE_THRESHOLD_SIZE = 33064 -GL_POINT_DISTANCE_ATTENUATION = 33065 -GL_CLAMP_TO_BORDER = 33069 -GL_CLAMP_TO_EDGE = 33071 -GL_TEXTURE_MIN_LOD = 33082 -GL_TEXTURE_MAX_LOD = 33083 -GL_TEXTURE_BASE_LEVEL = 33084 -GL_TEXTURE_MAX_LEVEL = 33085 -GL_GENERATE_MIPMAP = 33169 -GL_GENERATE_MIPMAP_HINT = 33170 -GL_DEPTH_COMPONENT16 = 33189 -GL_DEPTH_COMPONENT24 = 33190 -GL_DEPTH_COMPONENT32 = 33191 -GL_LIGHT_MODEL_COLOR_CONTROL = 33272 -GL_SINGLE_COLOR = 33273 -GL_SEPARATE_SPECULAR_COLOR = 33274 -GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING = 33296 -GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE = 33297 -GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE = 33298 -GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE = 33299 -GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE = 33300 -GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE = 33301 -GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE = 33302 -GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE = 33303 -GL_FRAMEBUFFER_DEFAULT = 33304 -GL_FRAMEBUFFER_UNDEFINED = 33305 -GL_DEPTH_STENCIL_ATTACHMENT = 33306 -GL_MAJOR_VERSION = 33307 -GL_MINOR_VERSION = 33308 -GL_NUM_EXTENSIONS = 33309 -GL_CONTEXT_FLAGS = 33310 -GL_BUFFER_IMMUTABLE_STORAGE = 33311 -GL_BUFFER_STORAGE_FLAGS = 33312 -GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED = 33313 -GL_INDEX = 33314 -GL_COMPRESSED_RED = 33317 -GL_COMPRESSED_RG = 33318 -GL_RG = 33319 -GL_RG_INTEGER = 33320 -GL_R8 = 33321 -GL_R16 = 33322 -GL_RG8 = 33323 -GL_RG16 = 33324 -GL_R16F = 33325 -GL_R32F = 33326 -GL_RG16F = 33327 -GL_RG32F = 33328 -GL_R8I = 33329 -GL_R8UI = 33330 -GL_R16I = 33331 -GL_R16UI = 33332 -GL_R32I = 33333 -GL_R32UI = 33334 -GL_RG8I = 33335 -GL_RG8UI = 33336 -GL_RG16I = 33337 -GL_RG16UI = 33338 -GL_RG32I = 33339 -GL_RG32UI = 33340 -GL_DEBUG_OUTPUT_SYNCHRONOUS = 33346 -GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH = 33347 -GL_DEBUG_CALLBACK_FUNCTION = 33348 -GL_DEBUG_CALLBACK_USER_PARAM = 33349 -GL_DEBUG_SOURCE_API = 33350 -GL_DEBUG_SOURCE_WINDOW_SYSTEM = 33351 -GL_DEBUG_SOURCE_SHADER_COMPILER = 33352 -GL_DEBUG_SOURCE_THIRD_PARTY = 33353 -GL_DEBUG_SOURCE_APPLICATION = 33354 -GL_DEBUG_SOURCE_OTHER = 33355 -GL_DEBUG_TYPE_ERROR = 33356 -GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR = 33357 -GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR = 33358 -GL_DEBUG_TYPE_PORTABILITY = 33359 -GL_DEBUG_TYPE_PERFORMANCE = 33360 -GL_DEBUG_TYPE_OTHER = 33361 -GL_LOSE_CONTEXT_ON_RESET = 33362 -GL_GUILTY_CONTEXT_RESET = 33363 -GL_INNOCENT_CONTEXT_RESET = 33364 -GL_UNKNOWN_CONTEXT_RESET = 33365 -GL_RESET_NOTIFICATION_STRATEGY = 33366 -GL_PROGRAM_BINARY_RETRIEVABLE_HINT = 33367 -GL_PROGRAM_SEPARABLE = 33368 -GL_ACTIVE_PROGRAM = 33369 -GL_PROGRAM_PIPELINE_BINDING = 33370 -GL_MAX_VIEWPORTS = 33371 -GL_VIEWPORT_SUBPIXEL_BITS = 33372 -GL_VIEWPORT_BOUNDS_RANGE = 33373 -GL_LAYER_PROVOKING_VERTEX = 33374 -GL_VIEWPORT_INDEX_PROVOKING_VERTEX = 33375 -GL_UNDEFINED_VERTEX = 33376 -GL_NO_RESET_NOTIFICATION = 33377 -GL_MAX_COMPUTE_SHARED_MEMORY_SIZE = 33378 -GL_MAX_COMPUTE_UNIFORM_COMPONENTS = 33379 -GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS = 33380 -GL_MAX_COMPUTE_ATOMIC_COUNTERS = 33381 -GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS = 33382 -GL_COMPUTE_WORK_GROUP_SIZE = 33383 -GL_DEBUG_TYPE_MARKER = 33384 -GL_DEBUG_TYPE_PUSH_GROUP = 33385 -GL_DEBUG_TYPE_POP_GROUP = 33386 -GL_DEBUG_SEVERITY_NOTIFICATION = 33387 -GL_MAX_DEBUG_GROUP_STACK_DEPTH = 33388 -GL_DEBUG_GROUP_STACK_DEPTH = 33389 -GL_MAX_UNIFORM_LOCATIONS = 33390 -GL_INTERNALFORMAT_SUPPORTED = 33391 -GL_INTERNALFORMAT_PREFERRED = 33392 -GL_INTERNALFORMAT_RED_SIZE = 33393 -GL_INTERNALFORMAT_GREEN_SIZE = 33394 -GL_INTERNALFORMAT_BLUE_SIZE = 33395 -GL_INTERNALFORMAT_ALPHA_SIZE = 33396 -GL_INTERNALFORMAT_DEPTH_SIZE = 33397 -GL_INTERNALFORMAT_STENCIL_SIZE = 33398 -GL_INTERNALFORMAT_SHARED_SIZE = 33399 -GL_INTERNALFORMAT_RED_TYPE = 33400 -GL_INTERNALFORMAT_GREEN_TYPE = 33401 -GL_INTERNALFORMAT_BLUE_TYPE = 33402 -GL_INTERNALFORMAT_ALPHA_TYPE = 33403 -GL_INTERNALFORMAT_DEPTH_TYPE = 33404 -GL_INTERNALFORMAT_STENCIL_TYPE = 33405 -GL_MAX_WIDTH = 33406 -GL_MAX_HEIGHT = 33407 -GL_MAX_DEPTH = 33408 -GL_MAX_LAYERS = 33409 -GL_MAX_COMBINED_DIMENSIONS = 33410 -GL_COLOR_COMPONENTS = 33411 -GL_DEPTH_COMPONENTS = 33412 -GL_STENCIL_COMPONENTS = 33413 -GL_COLOR_RENDERABLE = 33414 -GL_DEPTH_RENDERABLE = 33415 -GL_STENCIL_RENDERABLE = 33416 -GL_FRAMEBUFFER_RENDERABLE = 33417 -GL_FRAMEBUFFER_RENDERABLE_LAYERED = 33418 -GL_FRAMEBUFFER_BLEND = 33419 -GL_READ_PIXELS = 33420 -GL_READ_PIXELS_FORMAT = 33421 -GL_READ_PIXELS_TYPE = 33422 -GL_TEXTURE_IMAGE_FORMAT = 33423 -GL_TEXTURE_IMAGE_TYPE = 33424 -GL_GET_TEXTURE_IMAGE_FORMAT = 33425 -GL_GET_TEXTURE_IMAGE_TYPE = 33426 -GL_MIPMAP = 33427 -GL_MANUAL_GENERATE_MIPMAP = 33428 -GL_AUTO_GENERATE_MIPMAP = 33429 -GL_COLOR_ENCODING = 33430 -GL_SRGB_READ = 33431 -GL_SRGB_WRITE = 33432 -GL_FILTER = 33434 -GL_VERTEX_TEXTURE = 33435 -GL_TESS_CONTROL_TEXTURE = 33436 -GL_TESS_EVALUATION_TEXTURE = 33437 -GL_GEOMETRY_TEXTURE = 33438 -GL_FRAGMENT_TEXTURE = 33439 -GL_COMPUTE_TEXTURE = 33440 -GL_TEXTURE_SHADOW = 33441 -GL_TEXTURE_GATHER = 33442 -GL_TEXTURE_GATHER_SHADOW = 33443 -GL_SHADER_IMAGE_LOAD = 33444 -GL_SHADER_IMAGE_STORE = 33445 -GL_SHADER_IMAGE_ATOMIC = 33446 -GL_IMAGE_TEXEL_SIZE = 33447 -GL_IMAGE_COMPATIBILITY_CLASS = 33448 -GL_IMAGE_PIXEL_FORMAT = 33449 -GL_IMAGE_PIXEL_TYPE = 33450 -GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST = 33452 -GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST = 33453 -GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE = 33454 -GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE = 33455 -GL_TEXTURE_COMPRESSED_BLOCK_WIDTH = 33457 -GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT = 33458 -GL_TEXTURE_COMPRESSED_BLOCK_SIZE = 33459 -GL_CLEAR_BUFFER = 33460 -GL_TEXTURE_VIEW = 33461 -GL_VIEW_COMPATIBILITY_CLASS = 33462 -GL_FULL_SUPPORT = 33463 -GL_CAVEAT_SUPPORT = 33464 -GL_IMAGE_CLASS_4_X_32 = 33465 -GL_IMAGE_CLASS_2_X_32 = 33466 -GL_IMAGE_CLASS_1_X_32 = 33467 -GL_IMAGE_CLASS_4_X_16 = 33468 -GL_IMAGE_CLASS_2_X_16 = 33469 -GL_IMAGE_CLASS_1_X_16 = 33470 -GL_IMAGE_CLASS_4_X_8 = 33471 -GL_IMAGE_CLASS_2_X_8 = 33472 -GL_IMAGE_CLASS_1_X_8 = 33473 -GL_IMAGE_CLASS_11_11_10 = 33474 -GL_IMAGE_CLASS_10_10_10_2 = 33475 -GL_VIEW_CLASS_128_BITS = 33476 -GL_VIEW_CLASS_96_BITS = 33477 -GL_VIEW_CLASS_64_BITS = 33478 -GL_VIEW_CLASS_48_BITS = 33479 -GL_VIEW_CLASS_32_BITS = 33480 -GL_VIEW_CLASS_24_BITS = 33481 -GL_VIEW_CLASS_16_BITS = 33482 -GL_VIEW_CLASS_8_BITS = 33483 -GL_VIEW_CLASS_S3TC_DXT1_RGB = 33484 -GL_VIEW_CLASS_S3TC_DXT1_RGBA = 33485 -GL_VIEW_CLASS_S3TC_DXT3_RGBA = 33486 -GL_VIEW_CLASS_S3TC_DXT5_RGBA = 33487 -GL_VIEW_CLASS_RGTC1_RED = 33488 -GL_VIEW_CLASS_RGTC2_RG = 33489 -GL_VIEW_CLASS_BPTC_UNORM = 33490 -GL_VIEW_CLASS_BPTC_FLOAT = 33491 -GL_VERTEX_ATTRIB_BINDING = 33492 -GL_VERTEX_ATTRIB_RELATIVE_OFFSET = 33493 -GL_VERTEX_BINDING_DIVISOR = 33494 -GL_VERTEX_BINDING_OFFSET = 33495 -GL_VERTEX_BINDING_STRIDE = 33496 -GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET = 33497 -GL_MAX_VERTEX_ATTRIB_BINDINGS = 33498 -GL_TEXTURE_VIEW_MIN_LEVEL = 33499 -GL_TEXTURE_VIEW_NUM_LEVELS = 33500 -GL_TEXTURE_VIEW_MIN_LAYER = 33501 -GL_TEXTURE_VIEW_NUM_LAYERS = 33502 -GL_TEXTURE_IMMUTABLE_LEVELS = 33503 -GL_BUFFER = 33504 -GL_SHADER = 33505 -GL_PROGRAM = 33506 -GL_QUERY = 33507 -GL_PROGRAM_PIPELINE = 33508 -GL_MAX_VERTEX_ATTRIB_STRIDE = 33509 -GL_SAMPLER = 33510 -GL_DISPLAY_LIST = 33511 -GL_MAX_LABEL_LENGTH = 33512 -GL_NUM_SHADING_LANGUAGE_VERSIONS = 33513 -GL_QUERY_TARGET = 33514 -GL_TRANSFORM_FEEDBACK_OVERFLOW = 33516 -GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW = 33517 -GL_VERTICES_SUBMITTED = 33518 -GL_PRIMITIVES_SUBMITTED = 33519 -GL_VERTEX_SHADER_INVOCATIONS = 33520 -GL_TESS_CONTROL_SHADER_PATCHES = 33521 -GL_TESS_EVALUATION_SHADER_INVOCATIONS = 33522 -GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED = 33523 -GL_FRAGMENT_SHADER_INVOCATIONS = 33524 -GL_COMPUTE_SHADER_INVOCATIONS = 33525 -GL_CLIPPING_INPUT_PRIMITIVES = 33526 -GL_CLIPPING_OUTPUT_PRIMITIVES = 33527 -GL_MAX_CULL_DISTANCES = 33529 -GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES = 33530 -GL_CONTEXT_RELEASE_BEHAVIOR = 33531 -GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH = 33532 -GL_UNSIGNED_BYTE_2_3_3_REV = 33634 -GL_UNSIGNED_SHORT_5_6_5 = 33635 -GL_UNSIGNED_SHORT_5_6_5_REV = 33636 -GL_UNSIGNED_SHORT_4_4_4_4_REV = 33637 -GL_UNSIGNED_SHORT_1_5_5_5_REV = 33638 -GL_UNSIGNED_INT_8_8_8_8_REV = 33639 -GL_UNSIGNED_INT_2_10_10_10_REV = 33640 -GL_MIRRORED_REPEAT = 33648 -GL_COMPRESSED_RGB_S3TC_DXT1_EXT = 33776 -GL_COMPRESSED_RGBA_S3TC_DXT1_EXT = 33777 -GL_COMPRESSED_RGBA_S3TC_DXT3_EXT = 33778 -GL_COMPRESSED_RGBA_S3TC_DXT5_EXT = 33779 -GL_FOG_COORDINATE_SOURCE = 33872 -GL_FOG_COORD_SRC = 33872 -GL_FOG_COORDINATE = 33873 -GL_FOG_COORD = 33873 -GL_FRAGMENT_DEPTH = 33874 -GL_CURRENT_FOG_COORDINATE = 33875 -GL_CURRENT_FOG_COORD = 33875 -GL_FOG_COORDINATE_ARRAY_TYPE = 33876 -GL_FOG_COORD_ARRAY_TYPE = 33876 -GL_FOG_COORDINATE_ARRAY_STRIDE = 33877 -GL_FOG_COORD_ARRAY_STRIDE = 33877 -GL_FOG_COORDINATE_ARRAY_POINTER = 33878 -GL_FOG_COORD_ARRAY_POINTER = 33878 -GL_FOG_COORDINATE_ARRAY = 33879 -GL_FOG_COORD_ARRAY = 33879 -GL_COLOR_SUM = 33880 -GL_CURRENT_SECONDARY_COLOR = 33881 -GL_SECONDARY_COLOR_ARRAY_SIZE = 33882 -GL_SECONDARY_COLOR_ARRAY_TYPE = 33883 -GL_SECONDARY_COLOR_ARRAY_STRIDE = 33884 -GL_SECONDARY_COLOR_ARRAY_POINTER = 33885 -GL_SECONDARY_COLOR_ARRAY = 33886 -GL_CURRENT_RASTER_SECONDARY_COLOR = 33887 -GL_ALIASED_POINT_SIZE_RANGE = 33901 -GL_ALIASED_LINE_WIDTH_RANGE = 33902 -GL_TEXTURE0 = 33984 -GL_TEXTURE1 = 33985 -GL_TEXTURE2 = 33986 -GL_TEXTURE3 = 33987 -GL_TEXTURE4 = 33988 -GL_TEXTURE5 = 33989 -GL_TEXTURE6 = 33990 -GL_TEXTURE7 = 33991 -GL_TEXTURE8 = 33992 -GL_TEXTURE9 = 33993 -GL_TEXTURE10 = 33994 -GL_TEXTURE11 = 33995 -GL_TEXTURE12 = 33996 -GL_TEXTURE13 = 33997 -GL_TEXTURE14 = 33998 -GL_TEXTURE15 = 33999 -GL_TEXTURE16 = 34000 -GL_TEXTURE17 = 34001 -GL_TEXTURE18 = 34002 -GL_TEXTURE19 = 34003 -GL_TEXTURE20 = 34004 -GL_TEXTURE21 = 34005 -GL_TEXTURE22 = 34006 -GL_TEXTURE23 = 34007 -GL_TEXTURE24 = 34008 -GL_TEXTURE25 = 34009 -GL_TEXTURE26 = 34010 -GL_TEXTURE27 = 34011 -GL_TEXTURE28 = 34012 -GL_TEXTURE29 = 34013 -GL_TEXTURE30 = 34014 -GL_TEXTURE31 = 34015 -GL_ACTIVE_TEXTURE = 34016 -GL_CLIENT_ACTIVE_TEXTURE = 34017 -GL_MAX_TEXTURE_UNITS = 34018 -GL_TRANSPOSE_MODELVIEW_MATRIX = 34019 -GL_TRANSPOSE_PROJECTION_MATRIX = 34020 -GL_TRANSPOSE_TEXTURE_MATRIX = 34021 -GL_TRANSPOSE_COLOR_MATRIX = 34022 -GL_SUBTRACT = 34023 -GL_MAX_RENDERBUFFER_SIZE = 34024 -GL_MAX_RENDERBUFFER_SIZE_EXT = 34024 -GL_COMPRESSED_ALPHA = 34025 -GL_COMPRESSED_LUMINANCE = 34026 -GL_COMPRESSED_LUMINANCE_ALPHA = 34027 -GL_COMPRESSED_INTENSITY = 34028 -GL_COMPRESSED_RGB = 34029 -GL_COMPRESSED_RGBA = 34030 -GL_TEXTURE_COMPRESSION_HINT = 34031 -GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER = 34032 -GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER = 34033 -GL_TEXTURE_RECTANGLE = 34037 -GL_TEXTURE_BINDING_RECTANGLE = 34038 -GL_PROXY_TEXTURE_RECTANGLE = 34039 -GL_MAX_RECTANGLE_TEXTURE_SIZE = 34040 -GL_DEPTH_STENCIL = 34041 -GL_UNSIGNED_INT_24_8 = 34042 -GL_MAX_TEXTURE_LOD_BIAS = 34045 -GL_TEXTURE_MAX_ANISOTROPY = 34046 -GL_MAX_TEXTURE_MAX_ANISOTROPY = 34047 -GL_TEXTURE_FILTER_CONTROL = 34048 -GL_TEXTURE_LOD_BIAS = 34049 -GL_INCR_WRAP = 34055 -GL_DECR_WRAP = 34056 -GL_NORMAL_MAP = 34065 -GL_REFLECTION_MAP = 34066 -GL_TEXTURE_CUBE_MAP = 34067 -GL_TEXTURE_BINDING_CUBE_MAP = 34068 -GL_TEXTURE_CUBE_MAP_POSITIVE_X = 34069 -GL_TEXTURE_CUBE_MAP_NEGATIVE_X = 34070 -GL_TEXTURE_CUBE_MAP_POSITIVE_Y = 34071 -GL_TEXTURE_CUBE_MAP_NEGATIVE_Y = 34072 -GL_TEXTURE_CUBE_MAP_POSITIVE_Z = 34073 -GL_TEXTURE_CUBE_MAP_NEGATIVE_Z = 34074 -GL_PROXY_TEXTURE_CUBE_MAP = 34075 -GL_MAX_CUBE_MAP_TEXTURE_SIZE = 34076 -GL_COMBINE = 34160 -GL_COMBINE_RGB = 34161 -GL_COMBINE_ALPHA = 34162 -GL_RGB_SCALE = 34163 -GL_ADD_SIGNED = 34164 -GL_INTERPOLATE = 34165 -GL_CONSTANT = 34166 -GL_PRIMARY_COLOR = 34167 -GL_PREVIOUS = 34168 -GL_SOURCE0_RGB = 34176 -GL_SRC0_RGB = 34176 -GL_SOURCE1_RGB = 34177 -GL_SRC1_RGB = 34177 -GL_SOURCE2_RGB = 34178 -GL_SRC2_RGB = 34178 -GL_SOURCE0_ALPHA = 34184 -GL_SRC0_ALPHA = 34184 -GL_SOURCE1_ALPHA = 34185 -GL_SRC1_ALPHA = 34185 -GL_SOURCE2_ALPHA = 34186 -GL_SRC2_ALPHA = 34186 -GL_OPERAND0_RGB = 34192 -GL_OPERAND1_RGB = 34193 -GL_OPERAND2_RGB = 34194 -GL_OPERAND0_ALPHA = 34200 -GL_OPERAND1_ALPHA = 34201 -GL_OPERAND2_ALPHA = 34202 -GL_VERTEX_ARRAY_BINDING = 34229 -GL_VERTEX_ATTRIB_ARRAY_ENABLED = 34338 -GL_VERTEX_ATTRIB_ARRAY_SIZE = 34339 -GL_VERTEX_ATTRIB_ARRAY_STRIDE = 34340 -GL_VERTEX_ATTRIB_ARRAY_TYPE = 34341 -GL_CURRENT_VERTEX_ATTRIB = 34342 -GL_VERTEX_PROGRAM_POINT_SIZE = 34370 -GL_PROGRAM_POINT_SIZE = 34370 -GL_VERTEX_PROGRAM_TWO_SIDE = 34371 -GL_VERTEX_ATTRIB_ARRAY_POINTER = 34373 -GL_DEPTH_CLAMP = 34383 -GL_TEXTURE_COMPRESSED_IMAGE_SIZE = 34464 -GL_TEXTURE_COMPRESSED = 34465 -GL_NUM_COMPRESSED_TEXTURE_FORMATS = 34466 -GL_COMPRESSED_TEXTURE_FORMATS = 34467 -GL_DOT3_RGB = 34478 -GL_DOT3_RGBA = 34479 -GL_PROGRAM_BINARY_LENGTH = 34625 -GL_MIRROR_CLAMP_TO_EDGE = 34627 -GL_VERTEX_ATTRIB_ARRAY_LONG = 34638 -GL_BUFFER_SIZE = 34660 -GL_BUFFER_USAGE = 34661 -GL_NUM_PROGRAM_BINARY_FORMATS = 34814 -GL_PROGRAM_BINARY_FORMATS = 34815 -GL_STENCIL_BACK_FUNC = 34816 -GL_STENCIL_BACK_FAIL = 34817 -GL_STENCIL_BACK_PASS_DEPTH_FAIL = 34818 -GL_STENCIL_BACK_PASS_DEPTH_PASS = 34819 -GL_RGBA32F = 34836 -GL_RGB32F = 34837 -GL_RGBA16F = 34842 -GL_RGB16F = 34843 -GL_MAX_DRAW_BUFFERS = 34852 -GL_DRAW_BUFFER0 = 34853 -GL_DRAW_BUFFER1 = 34854 -GL_DRAW_BUFFER2 = 34855 -GL_DRAW_BUFFER3 = 34856 -GL_DRAW_BUFFER4 = 34857 -GL_DRAW_BUFFER5 = 34858 -GL_DRAW_BUFFER6 = 34859 -GL_DRAW_BUFFER7 = 34860 -GL_DRAW_BUFFER8 = 34861 -GL_DRAW_BUFFER9 = 34862 -GL_DRAW_BUFFER10 = 34863 -GL_DRAW_BUFFER11 = 34864 -GL_DRAW_BUFFER12 = 34865 -GL_DRAW_BUFFER13 = 34866 -GL_DRAW_BUFFER14 = 34867 -GL_DRAW_BUFFER15 = 34868 -GL_BLEND_EQUATION_ALPHA = 34877 -GL_TEXTURE_DEPTH_SIZE = 34890 -GL_DEPTH_TEXTURE_MODE = 34891 -GL_TEXTURE_COMPARE_MODE = 34892 -GL_TEXTURE_COMPARE_FUNC = 34893 -GL_COMPARE_R_TO_TEXTURE = 34894 -GL_COMPARE_REF_TO_TEXTURE = 34894 -GL_TEXTURE_CUBE_MAP_SEAMLESS = 34895 -GL_POINT_SPRITE = 34913 -GL_COORD_REPLACE = 34914 -GL_QUERY_COUNTER_BITS = 34916 -GL_CURRENT_QUERY = 34917 -GL_QUERY_RESULT = 34918 -GL_QUERY_RESULT_AVAILABLE = 34919 -GL_MAX_VERTEX_ATTRIBS = 34921 -GL_VERTEX_ATTRIB_ARRAY_NORMALIZED = 34922 -GL_MAX_TESS_CONTROL_INPUT_COMPONENTS = 34924 -GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS = 34925 -GL_MAX_TEXTURE_COORDS = 34929 -GL_MAX_TEXTURE_IMAGE_UNITS = 34930 -GL_GEOMETRY_SHADER_INVOCATIONS = 34943 -GL_ARRAY_BUFFER = 34962 -GL_ELEMENT_ARRAY_BUFFER = 34963 -GL_ARRAY_BUFFER_BINDING = 34964 -GL_ELEMENT_ARRAY_BUFFER_BINDING = 34965 -GL_VERTEX_ARRAY_BUFFER_BINDING = 34966 -GL_NORMAL_ARRAY_BUFFER_BINDING = 34967 -GL_COLOR_ARRAY_BUFFER_BINDING = 34968 -GL_INDEX_ARRAY_BUFFER_BINDING = 34969 -GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING = 34970 -GL_EDGE_FLAG_ARRAY_BUFFER_BINDING = 34971 -GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING = 34972 -GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING = 34973 -GL_FOG_COORD_ARRAY_BUFFER_BINDING = 34973 -GL_WEIGHT_ARRAY_BUFFER_BINDING = 34974 -GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING = 34975 -GL_READ_ONLY = 35000 -GL_WRITE_ONLY = 35001 -GL_READ_WRITE = 35002 -GL_BUFFER_ACCESS = 35003 -GL_BUFFER_MAPPED = 35004 -GL_BUFFER_MAP_POINTER = 35005 -GL_TIME_ELAPSED = 35007 -GL_STREAM_DRAW = 35040 -GL_STREAM_READ = 35041 -GL_STREAM_COPY = 35042 -GL_STATIC_DRAW = 35044 -GL_STATIC_READ = 35045 -GL_STATIC_COPY = 35046 -GL_DYNAMIC_DRAW = 35048 -GL_DYNAMIC_READ = 35049 -GL_DYNAMIC_COPY = 35050 -GL_PIXEL_PACK_BUFFER = 35051 -GL_PIXEL_UNPACK_BUFFER = 35052 -GL_PIXEL_PACK_BUFFER_BINDING = 35053 -GL_PIXEL_UNPACK_BUFFER_BINDING = 35055 -GL_DEPTH24_STENCIL8 = 35056 -GL_TEXTURE_STENCIL_SIZE = 35057 -GL_SRC1_COLOR = 35065 -GL_ONE_MINUS_SRC1_COLOR = 35066 -GL_ONE_MINUS_SRC1_ALPHA = 35067 -GL_MAX_DUAL_SOURCE_DRAW_BUFFERS = 35068 -GL_VERTEX_ATTRIB_ARRAY_INTEGER = 35069 -GL_VERTEX_ATTRIB_ARRAY_DIVISOR = 35070 -GL_MAX_ARRAY_TEXTURE_LAYERS = 35071 -GL_MIN_PROGRAM_TEXEL_OFFSET = 35076 -GL_MAX_PROGRAM_TEXEL_OFFSET = 35077 -GL_SAMPLES_PASSED = 35092 -GL_GEOMETRY_VERTICES_OUT = 35094 -GL_GEOMETRY_INPUT_TYPE = 35095 -GL_GEOMETRY_OUTPUT_TYPE = 35096 -GL_SAMPLER_BINDING = 35097 -GL_CLAMP_VERTEX_COLOR = 35098 -GL_CLAMP_FRAGMENT_COLOR = 35099 -GL_CLAMP_READ_COLOR = 35100 -GL_FIXED_ONLY = 35101 -GL_UNIFORM_BUFFER = 35345 -GL_UNIFORM_BUFFER_BINDING = 35368 -GL_UNIFORM_BUFFER_START = 35369 -GL_UNIFORM_BUFFER_SIZE = 35370 -GL_MAX_VERTEX_UNIFORM_BLOCKS = 35371 -GL_MAX_GEOMETRY_UNIFORM_BLOCKS = 35372 -GL_MAX_FRAGMENT_UNIFORM_BLOCKS = 35373 -GL_MAX_COMBINED_UNIFORM_BLOCKS = 35374 -GL_MAX_UNIFORM_BUFFER_BINDINGS = 35375 -GL_MAX_UNIFORM_BLOCK_SIZE = 35376 -GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS = 35377 -GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS = 35378 -GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS = 35379 -GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT = 35380 -GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH = 35381 -GL_ACTIVE_UNIFORM_BLOCKS = 35382 -GL_UNIFORM_TYPE = 35383 -GL_UNIFORM_SIZE = 35384 -GL_UNIFORM_NAME_LENGTH = 35385 -GL_UNIFORM_BLOCK_INDEX = 35386 -GL_UNIFORM_OFFSET = 35387 -GL_UNIFORM_ARRAY_STRIDE = 35388 -GL_UNIFORM_MATRIX_STRIDE = 35389 -GL_UNIFORM_IS_ROW_MAJOR = 35390 -GL_UNIFORM_BLOCK_BINDING = 35391 -GL_UNIFORM_BLOCK_DATA_SIZE = 35392 -GL_UNIFORM_BLOCK_NAME_LENGTH = 35393 -GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS = 35394 -GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES = 35395 -GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER = 35396 -GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER = 35397 -GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER = 35398 -GL_FRAGMENT_SHADER = 35632 -GL_VERTEX_SHADER = 35633 -GL_MAX_FRAGMENT_UNIFORM_COMPONENTS = 35657 -GL_MAX_VERTEX_UNIFORM_COMPONENTS = 35658 -GL_MAX_VARYING_FLOATS = 35659 -GL_MAX_VARYING_COMPONENTS = 35659 -GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS = 35660 -GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS = 35661 -GL_SHADER_TYPE = 35663 -GL_FLOAT_VEC2 = 35664 -GL_FLOAT_VEC3 = 35665 -GL_FLOAT_VEC4 = 35666 -GL_INT_VEC2 = 35667 -GL_INT_VEC3 = 35668 -GL_INT_VEC4 = 35669 -GL_BOOL = 35670 -GL_BOOL_VEC2 = 35671 -GL_BOOL_VEC3 = 35672 -GL_BOOL_VEC4 = 35673 -GL_FLOAT_MAT2 = 35674 -GL_FLOAT_MAT3 = 35675 -GL_FLOAT_MAT4 = 35676 -GL_SAMPLER_1D = 35677 -GL_SAMPLER_2D = 35678 -GL_SAMPLER_3D = 35679 -GL_SAMPLER_CUBE = 35680 -GL_SAMPLER_1D_SHADOW = 35681 -GL_SAMPLER_2D_SHADOW = 35682 -GL_SAMPLER_2D_RECT = 35683 -GL_SAMPLER_2D_RECT_SHADOW = 35684 -GL_FLOAT_MAT2x3 = 35685 -GL_FLOAT_MAT2x4 = 35686 -GL_FLOAT_MAT3x2 = 35687 -GL_FLOAT_MAT3x4 = 35688 -GL_FLOAT_MAT4x2 = 35689 -GL_FLOAT_MAT4x3 = 35690 -GL_DELETE_STATUS = 35712 -GL_COMPILE_STATUS = 35713 -GL_LINK_STATUS = 35714 -GL_VALIDATE_STATUS = 35715 -GL_INFO_LOG_LENGTH = 35716 -GL_ATTACHED_SHADERS = 35717 -GL_ACTIVE_UNIFORMS = 35718 -GL_ACTIVE_UNIFORM_MAX_LENGTH = 35719 -GL_SHADER_SOURCE_LENGTH = 35720 -GL_ACTIVE_ATTRIBUTES = 35721 -GL_ACTIVE_ATTRIBUTE_MAX_LENGTH = 35722 -GL_FRAGMENT_SHADER_DERIVATIVE_HINT = 35723 -GL_SHADING_LANGUAGE_VERSION = 35724 -GL_CURRENT_PROGRAM = 35725 -GL_IMPLEMENTATION_COLOR_READ_TYPE = 35738 -GL_IMPLEMENTATION_COLOR_READ_FORMAT = 35739 -GL_TEXTURE_RED_TYPE = 35856 -GL_TEXTURE_GREEN_TYPE = 35857 -GL_TEXTURE_BLUE_TYPE = 35858 -GL_TEXTURE_ALPHA_TYPE = 35859 -GL_TEXTURE_LUMINANCE_TYPE = 35860 -GL_TEXTURE_INTENSITY_TYPE = 35861 -GL_TEXTURE_DEPTH_TYPE = 35862 -GL_UNSIGNED_NORMALIZED = 35863 -GL_TEXTURE_1D_ARRAY = 35864 -GL_PROXY_TEXTURE_1D_ARRAY = 35865 -GL_TEXTURE_2D_ARRAY = 35866 -GL_PROXY_TEXTURE_2D_ARRAY = 35867 -GL_TEXTURE_BINDING_1D_ARRAY = 35868 -GL_TEXTURE_BINDING_2D_ARRAY = 35869 -GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS = 35881 -GL_TEXTURE_BUFFER = 35882 -GL_TEXTURE_BUFFER_BINDING = 35882 -GL_MAX_TEXTURE_BUFFER_SIZE = 35883 -GL_TEXTURE_BINDING_BUFFER = 35884 -GL_TEXTURE_BUFFER_DATA_STORE_BINDING = 35885 -GL_ANY_SAMPLES_PASSED = 35887 -GL_SAMPLE_SHADING = 35894 -GL_MIN_SAMPLE_SHADING_VALUE = 35895 -GL_R11F_G11F_B10F = 35898 -GL_UNSIGNED_INT_10F_11F_11F_REV = 35899 -GL_RGB9_E5 = 35901 -GL_UNSIGNED_INT_5_9_9_9_REV = 35902 -GL_TEXTURE_SHARED_SIZE = 35903 -GL_SRGB = 35904 -GL_SRGB8 = 35905 -GL_SRGB_ALPHA = 35906 -GL_SRGB8_ALPHA8 = 35907 -GL_SLUMINANCE_ALPHA = 35908 -GL_SLUMINANCE8_ALPHA8 = 35909 -GL_SLUMINANCE = 35910 -GL_SLUMINANCE8 = 35911 -GL_COMPRESSED_SRGB = 35912 -GL_COMPRESSED_SRGB_ALPHA = 35913 -GL_COMPRESSED_SLUMINANCE = 35914 -GL_COMPRESSED_SLUMINANCE_ALPHA = 35915 -GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH = 35958 -GL_TRANSFORM_FEEDBACK_BUFFER_MODE = 35967 -GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS = 35968 -GL_TRANSFORM_FEEDBACK_VARYINGS = 35971 -GL_TRANSFORM_FEEDBACK_BUFFER_START = 35972 -GL_TRANSFORM_FEEDBACK_BUFFER_SIZE = 35973 -GL_PRIMITIVES_GENERATED = 35975 -GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN = 35976 -GL_RASTERIZER_DISCARD = 35977 -GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS = 35978 -GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS = 35979 -GL_INTERLEAVED_ATTRIBS = 35980 -GL_SEPARATE_ATTRIBS = 35981 -GL_TRANSFORM_FEEDBACK_BUFFER = 35982 -GL_TRANSFORM_FEEDBACK_BUFFER_BINDING = 35983 -GL_POINT_SPRITE_COORD_ORIGIN = 36000 -GL_LOWER_LEFT = 36001 -GL_UPPER_LEFT = 36002 -GL_STENCIL_BACK_REF = 36003 -GL_STENCIL_BACK_VALUE_MASK = 36004 -GL_STENCIL_BACK_WRITEMASK = 36005 -GL_FRAMEBUFFER_BINDING = 36006 -GL_DRAW_FRAMEBUFFER_BINDING = 36006 -GL_FRAMEBUFFER_BINDING_EXT = 36006 -GL_RENDERBUFFER_BINDING = 36007 -GL_RENDERBUFFER_BINDING_EXT = 36007 -GL_READ_FRAMEBUFFER = 36008 -GL_DRAW_FRAMEBUFFER = 36009 -GL_READ_FRAMEBUFFER_BINDING = 36010 -GL_RENDERBUFFER_SAMPLES = 36011 -GL_DEPTH_COMPONENT32F = 36012 -GL_DEPTH32F_STENCIL8 = 36013 -GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE = 36048 -GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT = 36048 -GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME = 36049 -GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT = 36049 -GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL = 36050 -GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT = 36050 -GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE = 36051 -GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT = 36051 -GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER = 36052 -GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT = 36052 -GL_FRAMEBUFFER_COMPLETE = 36053 -GL_FRAMEBUFFER_COMPLETE_EXT = 36053 -GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT = 36054 -GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT = 36054 -GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT = 36055 -GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT = 36055 -GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT = 36057 -GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT = 36058 -GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER = 36059 -GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT = 36059 -GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER = 36060 -GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT = 36060 -GL_FRAMEBUFFER_UNSUPPORTED = 36061 -GL_FRAMEBUFFER_UNSUPPORTED_EXT = 36061 -GL_MAX_COLOR_ATTACHMENTS = 36063 -GL_MAX_COLOR_ATTACHMENTS_EXT = 36063 -GL_COLOR_ATTACHMENT0 = 36064 -GL_COLOR_ATTACHMENT0_EXT = 36064 -GL_COLOR_ATTACHMENT1 = 36065 -GL_COLOR_ATTACHMENT1_EXT = 36065 -GL_COLOR_ATTACHMENT2 = 36066 -GL_COLOR_ATTACHMENT2_EXT = 36066 -GL_COLOR_ATTACHMENT3 = 36067 -GL_COLOR_ATTACHMENT3_EXT = 36067 -GL_COLOR_ATTACHMENT4 = 36068 -GL_COLOR_ATTACHMENT4_EXT = 36068 -GL_COLOR_ATTACHMENT5 = 36069 -GL_COLOR_ATTACHMENT5_EXT = 36069 -GL_COLOR_ATTACHMENT6 = 36070 -GL_COLOR_ATTACHMENT6_EXT = 36070 -GL_COLOR_ATTACHMENT7 = 36071 -GL_COLOR_ATTACHMENT7_EXT = 36071 -GL_COLOR_ATTACHMENT8 = 36072 -GL_COLOR_ATTACHMENT8_EXT = 36072 -GL_COLOR_ATTACHMENT9 = 36073 -GL_COLOR_ATTACHMENT9_EXT = 36073 -GL_COLOR_ATTACHMENT10 = 36074 -GL_COLOR_ATTACHMENT10_EXT = 36074 -GL_COLOR_ATTACHMENT11 = 36075 -GL_COLOR_ATTACHMENT11_EXT = 36075 -GL_COLOR_ATTACHMENT12 = 36076 -GL_COLOR_ATTACHMENT12_EXT = 36076 -GL_COLOR_ATTACHMENT13 = 36077 -GL_COLOR_ATTACHMENT13_EXT = 36077 -GL_COLOR_ATTACHMENT14 = 36078 -GL_COLOR_ATTACHMENT14_EXT = 36078 -GL_COLOR_ATTACHMENT15 = 36079 -GL_COLOR_ATTACHMENT15_EXT = 36079 -GL_COLOR_ATTACHMENT16 = 36080 -GL_COLOR_ATTACHMENT17 = 36081 -GL_COLOR_ATTACHMENT18 = 36082 -GL_COLOR_ATTACHMENT19 = 36083 -GL_COLOR_ATTACHMENT20 = 36084 -GL_COLOR_ATTACHMENT21 = 36085 -GL_COLOR_ATTACHMENT22 = 36086 -GL_COLOR_ATTACHMENT23 = 36087 -GL_COLOR_ATTACHMENT24 = 36088 -GL_COLOR_ATTACHMENT25 = 36089 -GL_COLOR_ATTACHMENT26 = 36090 -GL_COLOR_ATTACHMENT27 = 36091 -GL_COLOR_ATTACHMENT28 = 36092 -GL_COLOR_ATTACHMENT29 = 36093 -GL_COLOR_ATTACHMENT30 = 36094 -GL_COLOR_ATTACHMENT31 = 36095 -GL_DEPTH_ATTACHMENT = 36096 -GL_DEPTH_ATTACHMENT_EXT = 36096 -GL_STENCIL_ATTACHMENT = 36128 -GL_STENCIL_ATTACHMENT_EXT = 36128 -GL_FRAMEBUFFER = 36160 -GL_FRAMEBUFFER_EXT = 36160 -GL_RENDERBUFFER = 36161 -GL_RENDERBUFFER_EXT = 36161 -GL_RENDERBUFFER_WIDTH = 36162 -GL_RENDERBUFFER_WIDTH_EXT = 36162 -GL_RENDERBUFFER_HEIGHT = 36163 -GL_RENDERBUFFER_HEIGHT_EXT = 36163 -GL_RENDERBUFFER_INTERNAL_FORMAT = 36164 -GL_RENDERBUFFER_INTERNAL_FORMAT_EXT = 36164 -GL_STENCIL_INDEX1 = 36166 -GL_STENCIL_INDEX1_EXT = 36166 -GL_STENCIL_INDEX4 = 36167 -GL_STENCIL_INDEX4_EXT = 36167 -GL_STENCIL_INDEX8 = 36168 -GL_STENCIL_INDEX8_EXT = 36168 -GL_STENCIL_INDEX16 = 36169 -GL_STENCIL_INDEX16_EXT = 36169 -GL_RENDERBUFFER_RED_SIZE = 36176 -GL_RENDERBUFFER_RED_SIZE_EXT = 36176 -GL_RENDERBUFFER_GREEN_SIZE = 36177 -GL_RENDERBUFFER_GREEN_SIZE_EXT = 36177 -GL_RENDERBUFFER_BLUE_SIZE = 36178 -GL_RENDERBUFFER_BLUE_SIZE_EXT = 36178 -GL_RENDERBUFFER_ALPHA_SIZE = 36179 -GL_RENDERBUFFER_ALPHA_SIZE_EXT = 36179 -GL_RENDERBUFFER_DEPTH_SIZE = 36180 -GL_RENDERBUFFER_DEPTH_SIZE_EXT = 36180 -GL_RENDERBUFFER_STENCIL_SIZE = 36181 -GL_RENDERBUFFER_STENCIL_SIZE_EXT = 36181 -GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE = 36182 -GL_MAX_SAMPLES = 36183 -GL_RGB565 = 36194 -GL_PRIMITIVE_RESTART_FIXED_INDEX = 36201 -GL_ANY_SAMPLES_PASSED_CONSERVATIVE = 36202 -GL_MAX_ELEMENT_INDEX = 36203 -GL_RGBA32UI = 36208 -GL_RGB32UI = 36209 -GL_RGBA16UI = 36214 -GL_RGB16UI = 36215 -GL_RGBA8UI = 36220 -GL_RGB8UI = 36221 -GL_RGBA32I = 36226 -GL_RGB32I = 36227 -GL_RGBA16I = 36232 -GL_RGB16I = 36233 -GL_RGBA8I = 36238 -GL_RGB8I = 36239 -GL_RED_INTEGER = 36244 -GL_GREEN_INTEGER = 36245 -GL_BLUE_INTEGER = 36246 -GL_ALPHA_INTEGER = 36247 -GL_RGB_INTEGER = 36248 -GL_RGBA_INTEGER = 36249 -GL_BGR_INTEGER = 36250 -GL_BGRA_INTEGER = 36251 -GL_INT_2_10_10_10_REV = 36255 -GL_FRAMEBUFFER_ATTACHMENT_LAYERED = 36263 -GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS = 36264 -GL_FLOAT_32_UNSIGNED_INT_24_8_REV = 36269 -GL_FRAMEBUFFER_SRGB = 36281 -GL_COMPRESSED_RED_RGTC1 = 36283 -GL_COMPRESSED_SIGNED_RED_RGTC1 = 36284 -GL_COMPRESSED_RG_RGTC2 = 36285 -GL_COMPRESSED_SIGNED_RG_RGTC2 = 36286 -GL_SAMPLER_1D_ARRAY = 36288 -GL_SAMPLER_2D_ARRAY = 36289 -GL_SAMPLER_BUFFER = 36290 -GL_SAMPLER_1D_ARRAY_SHADOW = 36291 -GL_SAMPLER_2D_ARRAY_SHADOW = 36292 -GL_SAMPLER_CUBE_SHADOW = 36293 -GL_UNSIGNED_INT_VEC2 = 36294 -GL_UNSIGNED_INT_VEC3 = 36295 -GL_UNSIGNED_INT_VEC4 = 36296 -GL_INT_SAMPLER_1D = 36297 -GL_INT_SAMPLER_2D = 36298 -GL_INT_SAMPLER_3D = 36299 -GL_INT_SAMPLER_CUBE = 36300 -GL_INT_SAMPLER_2D_RECT = 36301 -GL_INT_SAMPLER_1D_ARRAY = 36302 -GL_INT_SAMPLER_2D_ARRAY = 36303 -GL_INT_SAMPLER_BUFFER = 36304 -GL_UNSIGNED_INT_SAMPLER_1D = 36305 -GL_UNSIGNED_INT_SAMPLER_2D = 36306 -GL_UNSIGNED_INT_SAMPLER_3D = 36307 -GL_UNSIGNED_INT_SAMPLER_CUBE = 36308 -GL_UNSIGNED_INT_SAMPLER_2D_RECT = 36309 -GL_UNSIGNED_INT_SAMPLER_1D_ARRAY = 36310 -GL_UNSIGNED_INT_SAMPLER_2D_ARRAY = 36311 -GL_UNSIGNED_INT_SAMPLER_BUFFER = 36312 -GL_GEOMETRY_SHADER = 36313 -GL_MAX_GEOMETRY_UNIFORM_COMPONENTS = 36319 -GL_MAX_GEOMETRY_OUTPUT_VERTICES = 36320 -GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS = 36321 -GL_ACTIVE_SUBROUTINES = 36325 -GL_ACTIVE_SUBROUTINE_UNIFORMS = 36326 -GL_MAX_SUBROUTINES = 36327 -GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS = 36328 -GL_LOW_FLOAT = 36336 -GL_MEDIUM_FLOAT = 36337 -GL_HIGH_FLOAT = 36338 -GL_LOW_INT = 36339 -GL_MEDIUM_INT = 36340 -GL_HIGH_INT = 36341 -GL_SHADER_BINARY_FORMATS = 36344 -GL_NUM_SHADER_BINARY_FORMATS = 36345 -GL_SHADER_COMPILER = 36346 -GL_MAX_VERTEX_UNIFORM_VECTORS = 36347 -GL_MAX_VARYING_VECTORS = 36348 -GL_MAX_FRAGMENT_UNIFORM_VECTORS = 36349 -GL_QUERY_WAIT = 36371 -GL_QUERY_NO_WAIT = 36372 -GL_QUERY_BY_REGION_WAIT = 36373 -GL_QUERY_BY_REGION_NO_WAIT = 36374 -GL_QUERY_WAIT_INVERTED = 36375 -GL_QUERY_NO_WAIT_INVERTED = 36376 -GL_QUERY_BY_REGION_WAIT_INVERTED = 36377 -GL_QUERY_BY_REGION_NO_WAIT_INVERTED = 36378 -GL_POLYGON_OFFSET_CLAMP = 36379 -GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS = 36382 -GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS = 36383 -GL_TRANSFORM_FEEDBACK = 36386 -GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED = 36387 -GL_TRANSFORM_FEEDBACK_PAUSED = 36387 -GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE = 36388 -GL_TRANSFORM_FEEDBACK_ACTIVE = 36388 -GL_TRANSFORM_FEEDBACK_BINDING = 36389 -GL_TIMESTAMP = 36392 -GL_TEXTURE_SWIZZLE_R = 36418 -GL_TEXTURE_SWIZZLE_G = 36419 -GL_TEXTURE_SWIZZLE_B = 36420 -GL_TEXTURE_SWIZZLE_A = 36421 -GL_TEXTURE_SWIZZLE_RGBA = 36422 -GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS = 36423 -GL_ACTIVE_SUBROUTINE_MAX_LENGTH = 36424 -GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH = 36425 -GL_NUM_COMPATIBLE_SUBROUTINES = 36426 -GL_COMPATIBLE_SUBROUTINES = 36427 -GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION = 36428 -GL_FIRST_VERTEX_CONVENTION = 36429 -GL_LAST_VERTEX_CONVENTION = 36430 -GL_PROVOKING_VERTEX = 36431 -GL_SAMPLE_POSITION = 36432 -GL_SAMPLE_MASK = 36433 -GL_SAMPLE_MASK_VALUE = 36434 -GL_MAX_SAMPLE_MASK_WORDS = 36441 -GL_MAX_GEOMETRY_SHADER_INVOCATIONS = 36442 -GL_MIN_FRAGMENT_INTERPOLATION_OFFSET = 36443 -GL_MAX_FRAGMENT_INTERPOLATION_OFFSET = 36444 -GL_FRAGMENT_INTERPOLATION_OFFSET_BITS = 36445 -GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET = 36446 -GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET = 36447 -GL_MAX_TRANSFORM_FEEDBACK_BUFFERS = 36464 -GL_MAX_VERTEX_STREAMS = 36465 -GL_PATCH_VERTICES = 36466 -GL_PATCH_DEFAULT_INNER_LEVEL = 36467 -GL_PATCH_DEFAULT_OUTER_LEVEL = 36468 -GL_TESS_CONTROL_OUTPUT_VERTICES = 36469 -GL_TESS_GEN_MODE = 36470 -GL_TESS_GEN_SPACING = 36471 -GL_TESS_GEN_VERTEX_ORDER = 36472 -GL_TESS_GEN_POINT_MODE = 36473 -GL_ISOLINES = 36474 -GL_FRACTIONAL_ODD = 36475 -GL_FRACTIONAL_EVEN = 36476 -GL_MAX_PATCH_VERTICES = 36477 -GL_MAX_TESS_GEN_LEVEL = 36478 -GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS = 36479 -GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS = 36480 -GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS = 36481 -GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS = 36482 -GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS = 36483 -GL_MAX_TESS_PATCH_COMPONENTS = 36484 -GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS = 36485 -GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS = 36486 -GL_TESS_EVALUATION_SHADER = 36487 -GL_TESS_CONTROL_SHADER = 36488 -GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS = 36489 -GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS = 36490 -GL_COMPRESSED_RGBA_BPTC_UNORM = 36492 -GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 36493 -GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 36494 -GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 36495 -GL_COPY_READ_BUFFER = 36662 -GL_COPY_READ_BUFFER_BINDING = 36662 -GL_COPY_WRITE_BUFFER = 36663 -GL_COPY_WRITE_BUFFER_BINDING = 36663 -GL_MAX_IMAGE_UNITS = 36664 -GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS = 36665 -GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES = 36665 -GL_IMAGE_BINDING_NAME = 36666 -GL_IMAGE_BINDING_LEVEL = 36667 -GL_IMAGE_BINDING_LAYERED = 36668 -GL_IMAGE_BINDING_LAYER = 36669 -GL_IMAGE_BINDING_ACCESS = 36670 -GL_DRAW_INDIRECT_BUFFER = 36671 -GL_DRAW_INDIRECT_BUFFER_BINDING = 36675 -GL_DOUBLE_MAT2 = 36678 -GL_DOUBLE_MAT3 = 36679 -GL_DOUBLE_MAT4 = 36680 -GL_DOUBLE_MAT2x3 = 36681 -GL_DOUBLE_MAT2x4 = 36682 -GL_DOUBLE_MAT3x2 = 36683 -GL_DOUBLE_MAT3x4 = 36684 -GL_DOUBLE_MAT4x2 = 36685 -GL_DOUBLE_MAT4x3 = 36686 -GL_VERTEX_BINDING_BUFFER = 36687 -GL_R8_SNORM = 36756 -GL_RG8_SNORM = 36757 -GL_RGB8_SNORM = 36758 -GL_RGBA8_SNORM = 36759 -GL_R16_SNORM = 36760 -GL_RG16_SNORM = 36761 -GL_RGB16_SNORM = 36762 -GL_RGBA16_SNORM = 36763 -GL_SIGNED_NORMALIZED = 36764 -GL_PRIMITIVE_RESTART = 36765 -GL_PRIMITIVE_RESTART_INDEX = 36766 -GL_DOUBLE_VEC2 = 36860 -GL_DOUBLE_VEC3 = 36861 -GL_DOUBLE_VEC4 = 36862 -GL_TEXTURE_CUBE_MAP_ARRAY = 36873 -GL_TEXTURE_BINDING_CUBE_MAP_ARRAY = 36874 -GL_PROXY_TEXTURE_CUBE_MAP_ARRAY = 36875 -GL_SAMPLER_CUBE_MAP_ARRAY = 36876 -GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW = 36877 -GL_INT_SAMPLER_CUBE_MAP_ARRAY = 36878 -GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY = 36879 -GL_IMAGE_1D = 36940 -GL_IMAGE_2D = 36941 -GL_IMAGE_3D = 36942 -GL_IMAGE_2D_RECT = 36943 -GL_IMAGE_CUBE = 36944 -GL_IMAGE_BUFFER = 36945 -GL_IMAGE_1D_ARRAY = 36946 -GL_IMAGE_2D_ARRAY = 36947 -GL_IMAGE_CUBE_MAP_ARRAY = 36948 -GL_IMAGE_2D_MULTISAMPLE = 36949 -GL_IMAGE_2D_MULTISAMPLE_ARRAY = 36950 -GL_INT_IMAGE_1D = 36951 -GL_INT_IMAGE_2D = 36952 -GL_INT_IMAGE_3D = 36953 -GL_INT_IMAGE_2D_RECT = 36954 -GL_INT_IMAGE_CUBE = 36955 -GL_INT_IMAGE_BUFFER = 36956 -GL_INT_IMAGE_1D_ARRAY = 36957 -GL_INT_IMAGE_2D_ARRAY = 36958 -GL_INT_IMAGE_CUBE_MAP_ARRAY = 36959 -GL_INT_IMAGE_2D_MULTISAMPLE = 36960 -GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY = 36961 -GL_UNSIGNED_INT_IMAGE_1D = 36962 -GL_UNSIGNED_INT_IMAGE_2D = 36963 -GL_UNSIGNED_INT_IMAGE_3D = 36964 -GL_UNSIGNED_INT_IMAGE_2D_RECT = 36965 -GL_UNSIGNED_INT_IMAGE_CUBE = 36966 -GL_UNSIGNED_INT_IMAGE_BUFFER = 36967 -GL_UNSIGNED_INT_IMAGE_1D_ARRAY = 36968 -GL_UNSIGNED_INT_IMAGE_2D_ARRAY = 36969 -GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY = 36970 -GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE = 36971 -GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY = 36972 -GL_MAX_IMAGE_SAMPLES = 36973 -GL_IMAGE_BINDING_FORMAT = 36974 -GL_RGB10_A2UI = 36975 -GL_MIN_MAP_BUFFER_ALIGNMENT = 37052 -GL_IMAGE_FORMAT_COMPATIBILITY_TYPE = 37063 -GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE = 37064 -GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS = 37065 -GL_MAX_VERTEX_IMAGE_UNIFORMS = 37066 -GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS = 37067 -GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS = 37068 -GL_MAX_GEOMETRY_IMAGE_UNIFORMS = 37069 -GL_MAX_FRAGMENT_IMAGE_UNIFORMS = 37070 -GL_MAX_COMBINED_IMAGE_UNIFORMS = 37071 -GL_SHADER_STORAGE_BUFFER = 37074 -GL_SHADER_STORAGE_BUFFER_BINDING = 37075 -GL_SHADER_STORAGE_BUFFER_START = 37076 -GL_SHADER_STORAGE_BUFFER_SIZE = 37077 -GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS = 37078 -GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS = 37079 -GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS = 37080 -GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS = 37081 -GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS = 37082 -GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS = 37083 -GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS = 37084 -GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS = 37085 -GL_MAX_SHADER_STORAGE_BLOCK_SIZE = 37086 -GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT = 37087 -GL_DEPTH_STENCIL_TEXTURE_MODE = 37098 -GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS = 37099 -GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER = 37100 -GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER = 37101 -GL_DISPATCH_INDIRECT_BUFFER = 37102 -GL_DISPATCH_INDIRECT_BUFFER_BINDING = 37103 -GL_TEXTURE_2D_MULTISAMPLE = 37120 -GL_PROXY_TEXTURE_2D_MULTISAMPLE = 37121 -GL_TEXTURE_2D_MULTISAMPLE_ARRAY = 37122 -GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY = 37123 -GL_TEXTURE_BINDING_2D_MULTISAMPLE = 37124 -GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY = 37125 -GL_TEXTURE_SAMPLES = 37126 -GL_TEXTURE_FIXED_SAMPLE_LOCATIONS = 37127 -GL_SAMPLER_2D_MULTISAMPLE = 37128 -GL_INT_SAMPLER_2D_MULTISAMPLE = 37129 -GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE = 37130 -GL_SAMPLER_2D_MULTISAMPLE_ARRAY = 37131 -GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY = 37132 -GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY = 37133 -GL_MAX_COLOR_TEXTURE_SAMPLES = 37134 -GL_MAX_DEPTH_TEXTURE_SAMPLES = 37135 -GL_MAX_INTEGER_SAMPLES = 37136 -GL_MAX_SERVER_WAIT_TIMEOUT = 37137 -GL_OBJECT_TYPE = 37138 -GL_SYNC_CONDITION = 37139 -GL_SYNC_STATUS = 37140 -GL_SYNC_FLAGS = 37141 -GL_SYNC_FENCE = 37142 -GL_SYNC_GPU_COMMANDS_COMPLETE = 37143 -GL_UNSIGNALED = 37144 -GL_SIGNALED = 37145 -GL_ALREADY_SIGNALED = 37146 -GL_TIMEOUT_EXPIRED = 37147 -GL_CONDITION_SATISFIED = 37148 -GL_WAIT_FAILED = 37149 -GL_BUFFER_ACCESS_FLAGS = 37151 -GL_BUFFER_MAP_LENGTH = 37152 -GL_BUFFER_MAP_OFFSET = 37153 -GL_MAX_VERTEX_OUTPUT_COMPONENTS = 37154 -GL_MAX_GEOMETRY_INPUT_COMPONENTS = 37155 -GL_MAX_GEOMETRY_OUTPUT_COMPONENTS = 37156 -GL_MAX_FRAGMENT_INPUT_COMPONENTS = 37157 -GL_CONTEXT_PROFILE_MASK = 37158 -GL_UNPACK_COMPRESSED_BLOCK_WIDTH = 37159 -GL_UNPACK_COMPRESSED_BLOCK_HEIGHT = 37160 -GL_UNPACK_COMPRESSED_BLOCK_DEPTH = 37161 -GL_UNPACK_COMPRESSED_BLOCK_SIZE = 37162 -GL_PACK_COMPRESSED_BLOCK_WIDTH = 37163 -GL_PACK_COMPRESSED_BLOCK_HEIGHT = 37164 -GL_PACK_COMPRESSED_BLOCK_DEPTH = 37165 -GL_PACK_COMPRESSED_BLOCK_SIZE = 37166 -GL_TEXTURE_IMMUTABLE_FORMAT = 37167 -GL_MAX_DEBUG_MESSAGE_LENGTH = 37187 -GL_MAX_DEBUG_LOGGED_MESSAGES = 37188 -GL_DEBUG_LOGGED_MESSAGES = 37189 -GL_DEBUG_SEVERITY_HIGH = 37190 -GL_DEBUG_SEVERITY_MEDIUM = 37191 -GL_DEBUG_SEVERITY_LOW = 37192 -GL_QUERY_BUFFER = 37266 -GL_QUERY_BUFFER_BINDING = 37267 -GL_QUERY_RESULT_NO_WAIT = 37268 -GL_TEXTURE_BUFFER_OFFSET = 37277 -GL_TEXTURE_BUFFER_SIZE = 37278 -GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT = 37279 -GL_COMPUTE_SHADER = 37305 -GL_MAX_COMPUTE_UNIFORM_BLOCKS = 37307 -GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS = 37308 -GL_MAX_COMPUTE_IMAGE_UNIFORMS = 37309 -GL_MAX_COMPUTE_WORK_GROUP_COUNT = 37310 -GL_MAX_COMPUTE_WORK_GROUP_SIZE = 37311 -GL_COMPRESSED_R11_EAC = 37488 -GL_COMPRESSED_SIGNED_R11_EAC = 37489 -GL_COMPRESSED_RG11_EAC = 37490 -GL_COMPRESSED_SIGNED_RG11_EAC = 37491 -GL_COMPRESSED_RGB8_ETC2 = 37492 -GL_COMPRESSED_SRGB8_ETC2 = 37493 -GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 37494 -GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 37495 -GL_COMPRESSED_RGBA8_ETC2_EAC = 37496 -GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 37497 -GL_ATOMIC_COUNTER_BUFFER = 37568 -GL_ATOMIC_COUNTER_BUFFER_BINDING = 37569 -GL_ATOMIC_COUNTER_BUFFER_START = 37570 -GL_ATOMIC_COUNTER_BUFFER_SIZE = 37571 -GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE = 37572 -GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS = 37573 -GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES = 37574 -GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER = 37575 -GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER = 37576 -GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER = 37577 -GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER = 37578 -GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER = 37579 -GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS = 37580 -GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS = 37581 -GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS = 37582 -GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS = 37583 -GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS = 37584 -GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS = 37585 -GL_MAX_VERTEX_ATOMIC_COUNTERS = 37586 -GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS = 37587 -GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS = 37588 -GL_MAX_GEOMETRY_ATOMIC_COUNTERS = 37589 -GL_MAX_FRAGMENT_ATOMIC_COUNTERS = 37590 -GL_MAX_COMBINED_ATOMIC_COUNTERS = 37591 -GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE = 37592 -GL_ACTIVE_ATOMIC_COUNTER_BUFFERS = 37593 -GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX = 37594 -GL_UNSIGNED_INT_ATOMIC_COUNTER = 37595 -GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS = 37596 -GL_DEBUG_OUTPUT = 37600 -GL_UNIFORM = 37601 -GL_UNIFORM_BLOCK = 37602 -GL_PROGRAM_INPUT = 37603 -GL_PROGRAM_OUTPUT = 37604 -GL_BUFFER_VARIABLE = 37605 -GL_SHADER_STORAGE_BLOCK = 37606 -GL_IS_PER_PATCH = 37607 -GL_VERTEX_SUBROUTINE = 37608 -GL_TESS_CONTROL_SUBROUTINE = 37609 -GL_TESS_EVALUATION_SUBROUTINE = 37610 -GL_GEOMETRY_SUBROUTINE = 37611 -GL_FRAGMENT_SUBROUTINE = 37612 -GL_COMPUTE_SUBROUTINE = 37613 -GL_VERTEX_SUBROUTINE_UNIFORM = 37614 -GL_TESS_CONTROL_SUBROUTINE_UNIFORM = 37615 -GL_TESS_EVALUATION_SUBROUTINE_UNIFORM = 37616 -GL_GEOMETRY_SUBROUTINE_UNIFORM = 37617 -GL_FRAGMENT_SUBROUTINE_UNIFORM = 37618 -GL_COMPUTE_SUBROUTINE_UNIFORM = 37619 -GL_TRANSFORM_FEEDBACK_VARYING = 37620 -GL_ACTIVE_RESOURCES = 37621 -GL_MAX_NAME_LENGTH = 37622 -GL_MAX_NUM_ACTIVE_VARIABLES = 37623 -GL_MAX_NUM_COMPATIBLE_SUBROUTINES = 37624 -GL_NAME_LENGTH = 37625 -GL_TYPE = 37626 -GL_ARRAY_SIZE = 37627 -GL_OFFSET = 37628 -GL_BLOCK_INDEX = 37629 -GL_ARRAY_STRIDE = 37630 -GL_MATRIX_STRIDE = 37631 -GL_IS_ROW_MAJOR = 37632 -GL_ATOMIC_COUNTER_BUFFER_INDEX = 37633 -GL_BUFFER_BINDING = 37634 -GL_BUFFER_DATA_SIZE = 37635 -GL_NUM_ACTIVE_VARIABLES = 37636 -GL_ACTIVE_VARIABLES = 37637 -GL_REFERENCED_BY_VERTEX_SHADER = 37638 -GL_REFERENCED_BY_TESS_CONTROL_SHADER = 37639 -GL_REFERENCED_BY_TESS_EVALUATION_SHADER = 37640 -GL_REFERENCED_BY_GEOMETRY_SHADER = 37641 -GL_REFERENCED_BY_FRAGMENT_SHADER = 37642 -GL_REFERENCED_BY_COMPUTE_SHADER = 37643 -GL_TOP_LEVEL_ARRAY_SIZE = 37644 -GL_TOP_LEVEL_ARRAY_STRIDE = 37645 -GL_LOCATION = 37646 -GL_LOCATION_INDEX = 37647 -GL_FRAMEBUFFER_DEFAULT_WIDTH = 37648 -GL_FRAMEBUFFER_DEFAULT_HEIGHT = 37649 -GL_FRAMEBUFFER_DEFAULT_LAYERS = 37650 -GL_FRAMEBUFFER_DEFAULT_SAMPLES = 37651 -GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS = 37652 -GL_MAX_FRAMEBUFFER_WIDTH = 37653 -GL_MAX_FRAMEBUFFER_HEIGHT = 37654 -GL_MAX_FRAMEBUFFER_LAYERS = 37655 -GL_MAX_FRAMEBUFFER_SAMPLES = 37656 -GL_LOCATION_COMPONENT = 37706 -GL_TRANSFORM_FEEDBACK_BUFFER_INDEX = 37707 -GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE = 37708 -GL_CLIP_ORIGIN = 37724 -GL_CLIP_DEPTH_MODE = 37725 -GL_NEGATIVE_ONE_TO_ONE = 37726 -GL_ZERO_TO_ONE = 37727 -GL_CLEAR_TEXTURE = 37733 -GL_NUM_SAMPLE_COUNTS = 37760 -GL_SHADER_BINARY_FORMAT_SPIR_V = 38225 -GL_SPIR_V_BINARY = 38226 -GL_SPIR_V_EXTENSIONS = 38227 -GL_NUM_SPIR_V_EXTENSIONS = 38228 -GL_EVAL_BIT = 65536 -GL_LIST_BIT = 131072 -GL_TEXTURE_BIT = 262144 -GL_SCISSOR_BIT = 524288 -GL_MULTISAMPLE_BIT = 536870912 -GL_MULTISAMPLE_BIT_ARB = 536870912 -GL_ALL_ATTRIB_BITS = 4294967295 -GL_CLIENT_ALL_ATTRIB_BITS = 4294967295 -GL_INVALID_INDEX = 4294967295 -GL_ALL_SHADER_BITS = 4294967295 -GL_ALL_BARRIER_BITS = 4294967295 -GL_TIMEOUT_IGNORED = 18446744073709551615 - -# GL command definitions -glAccum = _link_function('glAccum', None, [GLenum, GLfloat], requires='OpenGL 1.0') -glActiveShaderProgram = _link_function('glActiveShaderProgram', None, [GLuint, GLuint], requires='OpenGL 4.1') -glActiveTexture = _link_function('glActiveTexture', None, [GLenum], requires='OpenGL 1.3') -glAlphaFunc = _link_function('glAlphaFunc', None, [GLenum, GLfloat], requires='OpenGL 1.0') -glAreTexturesResident = _link_function('glAreTexturesResident', GLboolean, [GLsizei, POINTER(GLuint), POINTER(GLboolean)], requires='OpenGL 1.1') -glArrayElement = _link_function('glArrayElement', None, [GLint], requires='OpenGL 1.1') -glAttachShader = _link_function('glAttachShader', None, [GLuint, GLuint], requires='OpenGL 2.0') -glBegin = _link_function('glBegin', None, [GLenum], requires='OpenGL 1.0') -glBeginConditionalRender = _link_function('glBeginConditionalRender', None, [GLuint, GLenum], requires='OpenGL 3.0') -glBeginQuery = _link_function('glBeginQuery', None, [GLenum, GLuint], requires='OpenGL 1.5') -glBeginQueryIndexed = _link_function('glBeginQueryIndexed', None, [GLenum, GLuint, GLuint], requires='OpenGL 4.0') -glBeginTransformFeedback = _link_function('glBeginTransformFeedback', None, [GLenum], requires='OpenGL 3.0') -glBindAttribLocation = _link_function('glBindAttribLocation', None, [GLuint, GLuint, POINTER(GLchar)], requires='OpenGL 2.0') -glBindBuffer = _link_function('glBindBuffer', None, [GLenum, GLuint], requires='OpenGL 1.5') -glBindBufferBase = _link_function('glBindBufferBase', None, [GLenum, GLuint, GLuint], requires='OpenGL 3.1') -glBindBufferRange = _link_function('glBindBufferRange', None, [GLenum, GLuint, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 3.1') -glBindBuffersBase = _link_function('glBindBuffersBase', None, [GLenum, GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4') -glBindBuffersRange = _link_function('glBindBuffersRange', None, [GLenum, GLuint, GLsizei, POINTER(GLuint), POINTER(GLintptr), POINTER(GLsizeiptr)], requires='OpenGL 4.4') -glBindFragDataLocation = _link_function('glBindFragDataLocation', None, [GLuint, GLuint, POINTER(GLchar)], requires='OpenGL 3.0') -glBindFragDataLocationIndexed = _link_function('glBindFragDataLocationIndexed', None, [GLuint, GLuint, GLuint, POINTER(GLchar)], requires='OpenGL 3.3') -glBindFramebuffer = _link_function('glBindFramebuffer', None, [GLenum, GLuint], requires='OpenGL 3.0') -glBindFramebufferEXT = _link_function('glBindFramebufferEXT', None, [GLenum, GLuint], requires='None') -glBindImageTexture = _link_function('glBindImageTexture', None, [GLuint, GLuint, GLint, GLboolean, GLint, GLenum, GLenum], requires='OpenGL 4.2') -glBindImageTextures = _link_function('glBindImageTextures', None, [GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4') -glBindProgramPipeline = _link_function('glBindProgramPipeline', None, [GLuint], requires='OpenGL 4.1') -glBindRenderbuffer = _link_function('glBindRenderbuffer', None, [GLenum, GLuint], requires='OpenGL 3.0') -glBindRenderbufferEXT = _link_function('glBindRenderbufferEXT', None, [GLenum, GLuint], requires='None') -glBindSampler = _link_function('glBindSampler', None, [GLuint, GLuint], requires='OpenGL 3.3') -glBindSamplers = _link_function('glBindSamplers', None, [GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4') -glBindTexture = _link_function('glBindTexture', None, [GLenum, GLuint], requires='OpenGL 1.1') -glBindTextureUnit = _link_function('glBindTextureUnit', None, [GLuint, GLuint], requires='OpenGL 4.5') -glBindTextures = _link_function('glBindTextures', None, [GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4') -glBindTransformFeedback = _link_function('glBindTransformFeedback', None, [GLenum, GLuint], requires='OpenGL 4.0') -glBindVertexArray = _link_function('glBindVertexArray', None, [GLuint], requires='OpenGL 3.0') -glBindVertexBuffer = _link_function('glBindVertexBuffer', None, [GLuint, GLuint, GLintptr, GLsizei], requires='OpenGL 4.3') -glBindVertexBuffers = _link_function('glBindVertexBuffers', None, [GLuint, GLsizei, POINTER(GLuint), POINTER(GLintptr), POINTER(GLsizei)], requires='OpenGL 4.4') -glBitmap = _link_function('glBitmap', None, [GLsizei, GLsizei, GLfloat, GLfloat, GLfloat, GLfloat, POINTER(GLubyte)], requires='OpenGL 1.0') -glBlendColor = _link_function('glBlendColor', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.4') -glBlendEquation = _link_function('glBlendEquation', None, [GLenum], requires='OpenGL 1.4') -glBlendEquationSeparate = _link_function('glBlendEquationSeparate', None, [GLenum, GLenum], requires='OpenGL 2.0') -glBlendEquationSeparatei = _link_function('glBlendEquationSeparatei', None, [GLuint, GLenum, GLenum], requires='OpenGL 4.0') -glBlendEquationi = _link_function('glBlendEquationi', None, [GLuint, GLenum], requires='OpenGL 4.0') -glBlendFunc = _link_function('glBlendFunc', None, [GLenum, GLenum], requires='OpenGL 1.0') -glBlendFuncSeparate = _link_function('glBlendFuncSeparate', None, [GLenum, GLenum, GLenum, GLenum], requires='OpenGL 1.4') -glBlendFuncSeparatei = _link_function('glBlendFuncSeparatei', None, [GLuint, GLenum, GLenum, GLenum, GLenum], requires='OpenGL 4.0') -glBlendFunci = _link_function('glBlendFunci', None, [GLuint, GLenum, GLenum], requires='OpenGL 4.0') -glBlitFramebuffer = _link_function('glBlitFramebuffer', None, [GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum], requires='OpenGL 3.0') -glBlitNamedFramebuffer = _link_function('glBlitNamedFramebuffer', None, [GLuint, GLuint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum], requires='OpenGL 4.5') -glBufferData = _link_function('glBufferData', None, [GLenum, GLsizeiptr, POINTER(GLvoid), GLenum], requires='OpenGL 1.5') -glBufferStorage = _link_function('glBufferStorage', None, [GLenum, GLsizeiptr, POINTER(GLvoid), GLbitfield], requires='OpenGL 4.4') -glBufferSubData = _link_function('glBufferSubData', None, [GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 1.5') -glCallList = _link_function('glCallList', None, [GLuint], requires='OpenGL 1.0') -glCallLists = _link_function('glCallLists', None, [GLsizei, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0') -glCheckFramebufferStatus = _link_function('glCheckFramebufferStatus', GLenum, [GLenum], requires='OpenGL 3.0') -glCheckFramebufferStatusEXT = _link_function('glCheckFramebufferStatusEXT', GLenum, [GLenum], requires='None') -glCheckNamedFramebufferStatus = _link_function('glCheckNamedFramebufferStatus', GLenum, [GLuint, GLenum], requires='OpenGL 4.5') -glClampColor = _link_function('glClampColor', None, [GLenum, GLenum], requires='OpenGL 3.0') -glClear = _link_function('glClear', None, [GLbitfield], requires='OpenGL 1.0') -glClearAccum = _link_function('glClearAccum', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glClearBufferData = _link_function('glClearBufferData', None, [GLenum, GLenum, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.3') -glClearBufferSubData = _link_function('glClearBufferSubData', None, [GLenum, GLenum, GLintptr, GLsizeiptr, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.3') -glClearBufferfi = _link_function('glClearBufferfi', None, [GLenum, GLint, GLfloat, GLint], requires='OpenGL 3.0') -glClearBufferfv = _link_function('glClearBufferfv', None, [GLenum, GLint, POINTER(GLfloat)], requires='OpenGL 3.0') -glClearBufferiv = _link_function('glClearBufferiv', None, [GLenum, GLint, POINTER(GLint)], requires='OpenGL 3.0') -glClearBufferuiv = _link_function('glClearBufferuiv', None, [GLenum, GLint, POINTER(GLuint)], requires='OpenGL 3.0') -glClearColor = _link_function('glClearColor', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glClearDepth = _link_function('glClearDepth', None, [GLdouble], requires='OpenGL 1.0') -glClearDepthf = _link_function('glClearDepthf', None, [GLfloat], requires='OpenGL 4.1') -glClearIndex = _link_function('glClearIndex', None, [GLfloat], requires='OpenGL 1.0') -glClearNamedBufferData = _link_function('glClearNamedBufferData', None, [GLuint, GLenum, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5') -glClearNamedBufferSubData = _link_function('glClearNamedBufferSubData', None, [GLuint, GLenum, GLintptr, GLsizeiptr, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5') -glClearNamedFramebufferfi = _link_function('glClearNamedFramebufferfi', None, [GLuint, GLenum, GLint, GLfloat, GLint], requires='OpenGL 4.5') -glClearNamedFramebufferfv = _link_function('glClearNamedFramebufferfv', None, [GLuint, GLenum, GLint, POINTER(GLfloat)], requires='OpenGL 4.5') -glClearNamedFramebufferiv = _link_function('glClearNamedFramebufferiv', None, [GLuint, GLenum, GLint, POINTER(GLint)], requires='OpenGL 4.5') -glClearNamedFramebufferuiv = _link_function('glClearNamedFramebufferuiv', None, [GLuint, GLenum, GLint, POINTER(GLuint)], requires='OpenGL 4.5') -glClearStencil = _link_function('glClearStencil', None, [GLint], requires='OpenGL 1.0') -glClearTexImage = _link_function('glClearTexImage', None, [GLuint, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.4') -glClearTexSubImage = _link_function('glClearTexSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.4') -glClientActiveTexture = _link_function('glClientActiveTexture', None, [GLenum], requires='OpenGL 1.3') -glClientWaitSync = _link_function('glClientWaitSync', GLenum, [GLsync, GLbitfield, GLuint64], requires='OpenGL 3.2') -glClipControl = _link_function('glClipControl', None, [GLenum, GLenum], requires='OpenGL 4.5') -glClipPlane = _link_function('glClipPlane', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.0') -glColor3b = _link_function('glColor3b', None, [GLbyte, GLbyte, GLbyte], requires='OpenGL 1.0') -glColor3bv = _link_function('glColor3bv', None, [POINTER(GLbyte)], requires='OpenGL 1.0') -glColor3d = _link_function('glColor3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glColor3dv = _link_function('glColor3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glColor3f = _link_function('glColor3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glColor3fv = _link_function('glColor3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glColor3i = _link_function('glColor3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0') -glColor3iv = _link_function('glColor3iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glColor3s = _link_function('glColor3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glColor3sv = _link_function('glColor3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glColor3ub = _link_function('glColor3ub', None, [GLubyte, GLubyte, GLubyte], requires='OpenGL 1.0') -glColor3ubv = _link_function('glColor3ubv', None, [POINTER(GLubyte)], requires='OpenGL 1.0') -glColor3ui = _link_function('glColor3ui', None, [GLuint, GLuint, GLuint], requires='OpenGL 1.0') -glColor3uiv = _link_function('glColor3uiv', None, [POINTER(GLuint)], requires='OpenGL 1.0') -glColor3us = _link_function('glColor3us', None, [GLushort, GLushort, GLushort], requires='OpenGL 1.0') -glColor3usv = _link_function('glColor3usv', None, [POINTER(GLushort)], requires='OpenGL 1.0') -glColor4b = _link_function('glColor4b', None, [GLbyte, GLbyte, GLbyte, GLbyte], requires='OpenGL 1.0') -glColor4bv = _link_function('glColor4bv', None, [POINTER(GLbyte)], requires='OpenGL 1.0') -glColor4d = _link_function('glColor4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glColor4dv = _link_function('glColor4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glColor4f = _link_function('glColor4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glColor4fv = _link_function('glColor4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glColor4i = _link_function('glColor4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0') -glColor4iv = _link_function('glColor4iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glColor4s = _link_function('glColor4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glColor4sv = _link_function('glColor4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glColor4ub = _link_function('glColor4ub', None, [GLubyte, GLubyte, GLubyte, GLubyte], requires='OpenGL 1.0') -glColor4ubv = _link_function('glColor4ubv', None, [POINTER(GLubyte)], requires='OpenGL 1.0') -glColor4ui = _link_function('glColor4ui', None, [GLuint, GLuint, GLuint, GLuint], requires='OpenGL 1.0') -glColor4uiv = _link_function('glColor4uiv', None, [POINTER(GLuint)], requires='OpenGL 1.0') -glColor4us = _link_function('glColor4us', None, [GLushort, GLushort, GLushort, GLushort], requires='OpenGL 1.0') -glColor4usv = _link_function('glColor4usv', None, [POINTER(GLushort)], requires='OpenGL 1.0') -glColorMask = _link_function('glColorMask', None, [GLboolean, GLboolean, GLboolean, GLboolean], requires='OpenGL 1.0') -glColorMaski = _link_function('glColorMaski', None, [GLuint, GLboolean, GLboolean, GLboolean, GLboolean], requires='OpenGL 3.0') -glColorMaterial = _link_function('glColorMaterial', None, [GLenum, GLenum], requires='OpenGL 1.0') -glColorP3ui = _link_function('glColorP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glColorP3uiv = _link_function('glColorP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glColorP4ui = _link_function('glColorP4ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glColorP4uiv = _link_function('glColorP4uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glColorPointer = _link_function('glColorPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glCompileShader = _link_function('glCompileShader', None, [GLuint], requires='OpenGL 2.0') -glCompressedTexImage1D = _link_function('glCompressedTexImage1D', None, [GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3') -glCompressedTexImage2D = _link_function('glCompressedTexImage2D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3') -glCompressedTexImage3D = _link_function('glCompressedTexImage3D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3') -glCompressedTexSubImage1D = _link_function('glCompressedTexSubImage1D', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3') -glCompressedTexSubImage2D = _link_function('glCompressedTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3') -glCompressedTexSubImage3D = _link_function('glCompressedTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3') -glCompressedTextureSubImage1D = _link_function('glCompressedTextureSubImage1D', None, [GLuint, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glCompressedTextureSubImage2D = _link_function('glCompressedTextureSubImage2D', None, [GLuint, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glCompressedTextureSubImage3D = _link_function('glCompressedTextureSubImage3D', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glCopyBufferSubData = _link_function('glCopyBufferSubData', None, [GLenum, GLenum, GLintptr, GLintptr, GLsizeiptr], requires='OpenGL 3.1') -glCopyImageSubData = _link_function('glCopyImageSubData', None, [GLuint, GLenum, GLint, GLint, GLint, GLint, GLuint, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.3') -glCopyNamedBufferSubData = _link_function('glCopyNamedBufferSubData', None, [GLuint, GLuint, GLintptr, GLintptr, GLsizeiptr], requires='OpenGL 4.5') -glCopyPixels = _link_function('glCopyPixels', None, [GLint, GLint, GLsizei, GLsizei, GLenum], requires='OpenGL 1.0') -glCopyTexImage1D = _link_function('glCopyTexImage1D', None, [GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLint], requires='OpenGL 1.1') -glCopyTexImage2D = _link_function('glCopyTexImage2D', None, [GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLsizei, GLint], requires='OpenGL 1.1') -glCopyTexSubImage1D = _link_function('glCopyTexSubImage1D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei], requires='OpenGL 1.1') -glCopyTexSubImage2D = _link_function('glCopyTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.1') -glCopyTexSubImage3D = _link_function('glCopyTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.2') -glCopyTextureSubImage1D = _link_function('glCopyTextureSubImage1D', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei], requires='OpenGL 4.5') -glCopyTextureSubImage2D = _link_function('glCopyTextureSubImage2D', None, [GLuint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.5') -glCopyTextureSubImage3D = _link_function('glCopyTextureSubImage3D', None, [GLuint, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.5') -glCreateBuffers = _link_function('glCreateBuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateFramebuffers = _link_function('glCreateFramebuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateProgram = _link_function('glCreateProgram', GLuint, [], requires='OpenGL 2.0') -glCreateProgramPipelines = _link_function('glCreateProgramPipelines', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateQueries = _link_function('glCreateQueries', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateRenderbuffers = _link_function('glCreateRenderbuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateSamplers = _link_function('glCreateSamplers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateShader = _link_function('glCreateShader', GLuint, [GLenum], requires='OpenGL 2.0') -glCreateShaderProgramv = _link_function('glCreateShaderProgramv', GLuint, [GLenum, GLsizei, POINTER(POINTER(GLchar))], requires='OpenGL 4.1') -glCreateTextures = _link_function('glCreateTextures', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateTransformFeedbacks = _link_function('glCreateTransformFeedbacks', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCreateVertexArrays = _link_function('glCreateVertexArrays', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glCullFace = _link_function('glCullFace', None, [GLenum], requires='OpenGL 1.0') -glDebugMessageCallback = _link_function('glDebugMessageCallback', None, [GLDEBUGPROC, POINTER(GLvoid)], requires='OpenGL 4.3') -glDebugMessageControl = _link_function('glDebugMessageControl', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLuint), GLboolean], requires='OpenGL 4.3') -glDebugMessageInsert = _link_function('glDebugMessageInsert', None, [GLenum, GLenum, GLuint, GLenum, GLsizei, POINTER(GLchar)], requires='OpenGL 4.3') -glDeleteBuffers = _link_function('glDeleteBuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5') -glDeleteFramebuffers = _link_function('glDeleteFramebuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glDeleteFramebuffersEXT = _link_function('glDeleteFramebuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None') -glDeleteLists = _link_function('glDeleteLists', None, [GLuint, GLsizei], requires='OpenGL 1.0') -glDeleteProgram = _link_function('glDeleteProgram', None, [GLuint], requires='OpenGL 2.0') -glDeleteProgramPipelines = _link_function('glDeleteProgramPipelines', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.1') -glDeleteQueries = _link_function('glDeleteQueries', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5') -glDeleteRenderbuffers = _link_function('glDeleteRenderbuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glDeleteRenderbuffersEXT = _link_function('glDeleteRenderbuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None') -glDeleteSamplers = _link_function('glDeleteSamplers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.3') -glDeleteShader = _link_function('glDeleteShader', None, [GLuint], requires='OpenGL 2.0') -glDeleteSync = _link_function('glDeleteSync', None, [GLsync], requires='OpenGL 3.2') -glDeleteTextures = _link_function('glDeleteTextures', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.1') -glDeleteTransformFeedbacks = _link_function('glDeleteTransformFeedbacks', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.0') -glDeleteVertexArrays = _link_function('glDeleteVertexArrays', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glDepthFunc = _link_function('glDepthFunc', None, [GLenum], requires='OpenGL 1.0') -glDepthMask = _link_function('glDepthMask', None, [GLboolean], requires='OpenGL 1.0') -glDepthRange = _link_function('glDepthRange', None, [GLdouble, GLdouble], requires='OpenGL 1.0') -glDepthRangeArrayv = _link_function('glDepthRangeArrayv', None, [GLuint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1') -glDepthRangeIndexed = _link_function('glDepthRangeIndexed', None, [GLuint, GLdouble, GLdouble], requires='OpenGL 4.1') -glDepthRangef = _link_function('glDepthRangef', None, [GLfloat, GLfloat], requires='OpenGL 4.1') -glDetachShader = _link_function('glDetachShader', None, [GLuint, GLuint], requires='OpenGL 2.0') -glDisable = _link_function('glDisable', None, [GLenum], requires='OpenGL 1.0') -glDisableClientState = _link_function('glDisableClientState', None, [GLenum], requires='OpenGL 1.1') -glDisableVertexArrayAttrib = _link_function('glDisableVertexArrayAttrib', None, [GLuint, GLuint], requires='OpenGL 4.5') -glDisableVertexAttribArray = _link_function('glDisableVertexAttribArray', None, [GLuint], requires='OpenGL 2.0') -glDisablei = _link_function('glDisablei', None, [GLenum, GLuint], requires='OpenGL 3.0') -glDispatchCompute = _link_function('glDispatchCompute', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.3') -glDispatchComputeIndirect = _link_function('glDispatchComputeIndirect', None, [GLintptr], requires='OpenGL 4.3') -glDrawArrays = _link_function('glDrawArrays', None, [GLenum, GLint, GLsizei], requires='OpenGL 1.1') -glDrawArraysIndirect = _link_function('glDrawArraysIndirect', None, [GLenum, POINTER(GLvoid)], requires='OpenGL 4.0') -glDrawArraysInstanced = _link_function('glDrawArraysInstanced', None, [GLenum, GLint, GLsizei, GLsizei], requires='OpenGL 3.1') -glDrawArraysInstancedBaseInstance = _link_function('glDrawArraysInstancedBaseInstance', None, [GLenum, GLint, GLsizei, GLsizei, GLuint], requires='OpenGL 4.2') -glDrawBuffer = _link_function('glDrawBuffer', None, [GLenum], requires='OpenGL 1.0') -glDrawBuffers = _link_function('glDrawBuffers', None, [GLsizei, POINTER(GLenum)], requires='OpenGL 2.0') -glDrawElements = _link_function('glDrawElements', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid)], requires='OpenGL 1.1') -glDrawElementsBaseVertex = _link_function('glDrawElementsBaseVertex', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLint], requires='OpenGL 3.2') -glDrawElementsIndirect = _link_function('glDrawElementsIndirect', None, [GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.0') -glDrawElementsInstanced = _link_function('glDrawElementsInstanced', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 3.1') -glDrawElementsInstancedBaseInstance = _link_function('glDrawElementsInstancedBaseInstance', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei, GLuint], requires='OpenGL 4.2') -glDrawElementsInstancedBaseVertex = _link_function('glDrawElementsInstancedBaseVertex', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei, GLint], requires='OpenGL 3.2') -glDrawElementsInstancedBaseVertexBaseInstance = _link_function('glDrawElementsInstancedBaseVertexBaseInstance', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei, GLint, GLuint], requires='OpenGL 4.2') -glDrawPixels = _link_function('glDrawPixels', None, [GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0') -glDrawRangeElements = _link_function('glDrawRangeElements', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)], requires='OpenGL 1.2') -glDrawRangeElementsBaseVertex = _link_function('glDrawRangeElementsBaseVertex', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid), GLint], requires='OpenGL 3.2') -glDrawTransformFeedback = _link_function('glDrawTransformFeedback', None, [GLenum, GLuint], requires='OpenGL 4.0') -glDrawTransformFeedbackInstanced = _link_function('glDrawTransformFeedbackInstanced', None, [GLenum, GLuint, GLsizei], requires='OpenGL 4.2') -glDrawTransformFeedbackStream = _link_function('glDrawTransformFeedbackStream', None, [GLenum, GLuint, GLuint], requires='OpenGL 4.0') -glDrawTransformFeedbackStreamInstanced = _link_function('glDrawTransformFeedbackStreamInstanced', None, [GLenum, GLuint, GLuint, GLsizei], requires='OpenGL 4.2') -glEdgeFlag = _link_function('glEdgeFlag', None, [GLboolean], requires='OpenGL 1.0') -glEdgeFlagPointer = _link_function('glEdgeFlagPointer', None, [GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glEdgeFlagv = _link_function('glEdgeFlagv', None, [POINTER(GLboolean)], requires='OpenGL 1.0') -glEnable = _link_function('glEnable', None, [GLenum], requires='OpenGL 1.0') -glEnableClientState = _link_function('glEnableClientState', None, [GLenum], requires='OpenGL 1.1') -glEnableVertexArrayAttrib = _link_function('glEnableVertexArrayAttrib', None, [GLuint, GLuint], requires='OpenGL 4.5') -glEnableVertexAttribArray = _link_function('glEnableVertexAttribArray', None, [GLuint], requires='OpenGL 2.0') -glEnablei = _link_function('glEnablei', None, [GLenum, GLuint], requires='OpenGL 3.0') -glEnd = _link_function('glEnd', None, [], requires='OpenGL 1.0') -glEndConditionalRender = _link_function('glEndConditionalRender', None, [], requires='OpenGL 3.0') -glEndList = _link_function('glEndList', None, [], requires='OpenGL 1.0') -glEndQuery = _link_function('glEndQuery', None, [GLenum], requires='OpenGL 1.5') -glEndQueryIndexed = _link_function('glEndQueryIndexed', None, [GLenum, GLuint], requires='OpenGL 4.0') -glEndTransformFeedback = _link_function('glEndTransformFeedback', None, [], requires='OpenGL 3.0') -glEvalCoord1d = _link_function('glEvalCoord1d', None, [GLdouble], requires='OpenGL 1.0') -glEvalCoord1dv = _link_function('glEvalCoord1dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glEvalCoord1f = _link_function('glEvalCoord1f', None, [GLfloat], requires='OpenGL 1.0') -glEvalCoord1fv = _link_function('glEvalCoord1fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glEvalCoord2d = _link_function('glEvalCoord2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0') -glEvalCoord2dv = _link_function('glEvalCoord2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glEvalCoord2f = _link_function('glEvalCoord2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0') -glEvalCoord2fv = _link_function('glEvalCoord2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glEvalMesh1 = _link_function('glEvalMesh1', None, [GLenum, GLint, GLint], requires='OpenGL 1.0') -glEvalMesh2 = _link_function('glEvalMesh2', None, [GLenum, GLint, GLint, GLint, GLint], requires='OpenGL 1.0') -glEvalPoint1 = _link_function('glEvalPoint1', None, [GLint], requires='OpenGL 1.0') -glEvalPoint2 = _link_function('glEvalPoint2', None, [GLint, GLint], requires='OpenGL 1.0') -glFeedbackBuffer = _link_function('glFeedbackBuffer', None, [GLsizei, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glFenceSync = _link_function('glFenceSync', GLsync, [GLenum, GLbitfield], requires='OpenGL 3.2') -glFinish = _link_function('glFinish', None, [], requires='OpenGL 1.0') -glFlush = _link_function('glFlush', None, [], requires='OpenGL 1.0') -glFlushMappedBufferRange = _link_function('glFlushMappedBufferRange', None, [GLenum, GLintptr, GLsizeiptr], requires='OpenGL 3.0') -glFlushMappedNamedBufferRange = _link_function('glFlushMappedNamedBufferRange', None, [GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.5') -glFogCoordPointer = _link_function('glFogCoordPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.4') -glFogCoordd = _link_function('glFogCoordd', None, [GLdouble], requires='OpenGL 1.4') -glFogCoorddv = _link_function('glFogCoorddv', None, [POINTER(GLdouble)], requires='OpenGL 1.4') -glFogCoordf = _link_function('glFogCoordf', None, [GLfloat], requires='OpenGL 1.4') -glFogCoordfv = _link_function('glFogCoordfv', None, [POINTER(GLfloat)], requires='OpenGL 1.4') -glFogf = _link_function('glFogf', None, [GLenum, GLfloat], requires='OpenGL 1.0') -glFogfv = _link_function('glFogfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glFogi = _link_function('glFogi', None, [GLenum, GLint], requires='OpenGL 1.0') -glFogiv = _link_function('glFogiv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glFramebufferParameteri = _link_function('glFramebufferParameteri', None, [GLenum, GLenum, GLint], requires='OpenGL 4.3') -glFramebufferRenderbuffer = _link_function('glFramebufferRenderbuffer', None, [GLenum, GLenum, GLenum, GLuint], requires='OpenGL 3.0') -glFramebufferRenderbufferEXT = _link_function('glFramebufferRenderbufferEXT', None, [GLenum, GLenum, GLenum, GLuint], requires='None') -glFramebufferTexture = _link_function('glFramebufferTexture', None, [GLenum, GLenum, GLuint, GLint], requires='OpenGL 3.2') -glFramebufferTexture1D = _link_function('glFramebufferTexture1D', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='OpenGL 3.0') -glFramebufferTexture1DEXT = _link_function('glFramebufferTexture1DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='None') -glFramebufferTexture2D = _link_function('glFramebufferTexture2D', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='OpenGL 3.0') -glFramebufferTexture2DEXT = _link_function('glFramebufferTexture2DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='None') -glFramebufferTexture3D = _link_function('glFramebufferTexture3D', None, [GLenum, GLenum, GLenum, GLuint, GLint, GLint], requires='OpenGL 3.0') -glFramebufferTexture3DEXT = _link_function('glFramebufferTexture3DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint, GLint], requires='None') -glFramebufferTextureLayer = _link_function('glFramebufferTextureLayer', None, [GLenum, GLenum, GLuint, GLint, GLint], requires='OpenGL 3.0') -glFrontFace = _link_function('glFrontFace', None, [GLenum], requires='OpenGL 1.0') -glFrustum = _link_function('glFrustum', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glGenBuffers = _link_function('glGenBuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5') -glGenFramebuffers = _link_function('glGenFramebuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glGenFramebuffersEXT = _link_function('glGenFramebuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None') -glGenLists = _link_function('glGenLists', GLuint, [GLsizei], requires='OpenGL 1.0') -glGenProgramPipelines = _link_function('glGenProgramPipelines', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.1') -glGenQueries = _link_function('glGenQueries', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5') -glGenRenderbuffers = _link_function('glGenRenderbuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glGenRenderbuffersEXT = _link_function('glGenRenderbuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None') -glGenSamplers = _link_function('glGenSamplers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.3') -glGenTextures = _link_function('glGenTextures', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.1') -glGenTransformFeedbacks = _link_function('glGenTransformFeedbacks', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.0') -glGenVertexArrays = _link_function('glGenVertexArrays', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glGenerateMipmap = _link_function('glGenerateMipmap', None, [GLenum], requires='OpenGL 3.0') -glGenerateMipmapEXT = _link_function('glGenerateMipmapEXT', None, [GLenum], requires='None') -glGenerateTextureMipmap = _link_function('glGenerateTextureMipmap', None, [GLuint], requires='OpenGL 4.5') -glGetActiveAtomicCounterBufferiv = _link_function('glGetActiveAtomicCounterBufferiv', None, [GLuint, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.2') -glGetActiveAttrib = _link_function('glGetActiveAttrib', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], requires='OpenGL 2.0') -glGetActiveSubroutineName = _link_function('glGetActiveSubroutineName', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.0') -glGetActiveSubroutineUniformName = _link_function('glGetActiveSubroutineUniformName', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.0') -glGetActiveSubroutineUniformiv = _link_function('glGetActiveSubroutineUniformiv', None, [GLuint, GLenum, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.0') -glGetActiveUniform = _link_function('glGetActiveUniform', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], requires='OpenGL 2.0') -glGetActiveUniformBlockName = _link_function('glGetActiveUniformBlockName', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 3.1') -glGetActiveUniformBlockiv = _link_function('glGetActiveUniformBlockiv', None, [GLuint, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.1') -glGetActiveUniformName = _link_function('glGetActiveUniformName', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 3.1') -glGetActiveUniformsiv = _link_function('glGetActiveUniformsiv', None, [GLuint, GLsizei, POINTER(GLuint), GLenum, POINTER(GLint)], requires='OpenGL 3.1') -glGetAttachedShaders = _link_function('glGetAttachedShaders', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLuint)], requires='OpenGL 2.0') -glGetAttribLocation = _link_function('glGetAttribLocation', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 2.0') -glGetBooleani_v = _link_function('glGetBooleani_v', None, [GLenum, GLuint, POINTER(GLboolean)], requires='OpenGL 3.0') -glGetBooleanv = _link_function('glGetBooleanv', None, [GLenum, POINTER(GLboolean)], requires='OpenGL 1.0') -glGetBufferParameteri64v = _link_function('glGetBufferParameteri64v', None, [GLenum, GLenum, POINTER(GLint64)], requires='OpenGL 3.2') -glGetBufferParameteriv = _link_function('glGetBufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.5') -glGetBufferPointerv = _link_function('glGetBufferPointerv', None, [GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.5') -glGetBufferSubData = _link_function('glGetBufferSubData', None, [GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 1.5') -glGetClipPlane = _link_function('glGetClipPlane', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.0') -glGetCompressedTexImage = _link_function('glGetCompressedTexImage', None, [GLenum, GLint, POINTER(GLvoid)], requires='OpenGL 1.3') -glGetCompressedTextureImage = _link_function('glGetCompressedTextureImage', None, [GLuint, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetCompressedTextureSubImage = _link_function('glGetCompressedTextureSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetDebugMessageLog = _link_function('glGetDebugMessageLog', GLuint, [GLuint, GLsizei, POINTER(GLenum), POINTER(GLenum), POINTER(GLuint), POINTER(GLenum), POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3') -glGetDoublei_v = _link_function('glGetDoublei_v', None, [GLenum, GLuint, POINTER(GLdouble)], requires='OpenGL 4.1') -glGetDoublev = _link_function('glGetDoublev', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.0') -glGetError = _link_function('glGetError', GLenum, [], requires='OpenGL 1.0') -glGetFloati_v = _link_function('glGetFloati_v', None, [GLenum, GLuint, POINTER(GLfloat)], requires='OpenGL 4.1') -glGetFloatv = _link_function('glGetFloatv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetFragDataIndex = _link_function('glGetFragDataIndex', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 3.3') -glGetFragDataLocation = _link_function('glGetFragDataLocation', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 3.0') -glGetFramebufferAttachmentParameteriv = _link_function('glGetFramebufferAttachmentParameteriv', None, [GLenum, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0') -glGetFramebufferAttachmentParameterivEXT = _link_function('glGetFramebufferAttachmentParameterivEXT', None, [GLenum, GLenum, GLenum, POINTER(GLint)], requires='None') -glGetFramebufferParameteriv = _link_function('glGetFramebufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.3') -glGetGraphicsResetStatus = _link_function('glGetGraphicsResetStatus', GLenum, [], requires='OpenGL 4.5') -glGetInteger64i_v = _link_function('glGetInteger64i_v', None, [GLenum, GLuint, POINTER(GLint64)], requires='OpenGL 3.2') -glGetInteger64v = _link_function('glGetInteger64v', None, [GLenum, POINTER(GLint64)], requires='OpenGL 3.2') -glGetIntegeri_v = _link_function('glGetIntegeri_v', None, [GLenum, GLuint, POINTER(GLint)], requires='OpenGL 3.1') -glGetIntegerv = _link_function('glGetIntegerv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetInternalformati64v = _link_function('glGetInternalformati64v', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLint64)], requires='OpenGL 4.3') -glGetInternalformativ = _link_function('glGetInternalformativ', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLint)], requires='OpenGL 4.2') -glGetLightfv = _link_function('glGetLightfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetLightiv = _link_function('glGetLightiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetMapdv = _link_function('glGetMapdv', None, [GLenum, GLenum, POINTER(GLdouble)], requires='OpenGL 1.0') -glGetMapfv = _link_function('glGetMapfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetMapiv = _link_function('glGetMapiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetMaterialfv = _link_function('glGetMaterialfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetMaterialiv = _link_function('glGetMaterialiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetMultisamplefv = _link_function('glGetMultisamplefv', None, [GLenum, GLuint, POINTER(GLfloat)], requires='OpenGL 3.2') -glGetNamedBufferParameteri64v = _link_function('glGetNamedBufferParameteri64v', None, [GLuint, GLenum, POINTER(GLint64)], requires='OpenGL 4.5') -glGetNamedBufferParameteriv = _link_function('glGetNamedBufferParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetNamedBufferPointerv = _link_function('glGetNamedBufferPointerv', None, [GLuint, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetNamedBufferSubData = _link_function('glGetNamedBufferSubData', None, [GLuint, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetNamedFramebufferAttachmentParameteriv = _link_function('glGetNamedFramebufferAttachmentParameteriv', None, [GLuint, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetNamedFramebufferParameteriv = _link_function('glGetNamedFramebufferParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetNamedRenderbufferParameteriv = _link_function('glGetNamedRenderbufferParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetObjectLabel = _link_function('glGetObjectLabel', None, [GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3') -glGetObjectPtrLabel = _link_function('glGetObjectPtrLabel', None, [POINTER(GLvoid), GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3') -glGetPixelMapfv = _link_function('glGetPixelMapfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetPixelMapuiv = _link_function('glGetPixelMapuiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 1.0') -glGetPixelMapusv = _link_function('glGetPixelMapusv', None, [GLenum, POINTER(GLushort)], requires='OpenGL 1.0') -glGetPointerv = _link_function('glGetPointerv', None, [GLenum, POINTER(GLvoid)], requires='OpenGL 4.3') -glGetPolygonStipple = _link_function('glGetPolygonStipple', None, [POINTER(GLubyte)], requires='OpenGL 1.0') -glGetProgramBinary = _link_function('glGetProgramBinary', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLenum), POINTER(GLvoid)], requires='OpenGL 4.1') -glGetProgramInfoLog = _link_function('glGetProgramInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 2.0') -glGetProgramInterfaceiv = _link_function('glGetProgramInterfaceiv', None, [GLuint, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.3') -glGetProgramPipelineInfoLog = _link_function('glGetProgramPipelineInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.1') -glGetProgramPipelineiv = _link_function('glGetProgramPipelineiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.1') -glGetProgramResourceIndex = _link_function('glGetProgramResourceIndex', GLuint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.3') -glGetProgramResourceLocation = _link_function('glGetProgramResourceLocation', GLint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.3') -glGetProgramResourceLocationIndex = _link_function('glGetProgramResourceLocationIndex', GLint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.3') -glGetProgramResourceName = _link_function('glGetProgramResourceName', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3') -glGetProgramResourceiv = _link_function('glGetProgramResourceiv', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLenum), GLsizei, POINTER(GLsizei), POINTER(GLint)], requires='OpenGL 4.3') -glGetProgramStageiv = _link_function('glGetProgramStageiv', None, [GLuint, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.0') -glGetProgramiv = _link_function('glGetProgramiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 2.0') -glGetQueryBufferObjecti64v = _link_function('glGetQueryBufferObjecti64v', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5') -glGetQueryBufferObjectiv = _link_function('glGetQueryBufferObjectiv', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5') -glGetQueryBufferObjectui64v = _link_function('glGetQueryBufferObjectui64v', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5') -glGetQueryBufferObjectuiv = _link_function('glGetQueryBufferObjectuiv', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5') -glGetQueryIndexediv = _link_function('glGetQueryIndexediv', None, [GLenum, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.0') -glGetQueryObjecti64v = _link_function('glGetQueryObjecti64v', None, [GLuint, GLenum, POINTER(GLint64)], requires='OpenGL 3.3') -glGetQueryObjectiv = _link_function('glGetQueryObjectiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 1.5') -glGetQueryObjectui64v = _link_function('glGetQueryObjectui64v', None, [GLuint, GLenum, POINTER(GLuint64)], requires='OpenGL 3.3') -glGetQueryObjectuiv = _link_function('glGetQueryObjectuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 1.5') -glGetQueryiv = _link_function('glGetQueryiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.5') -glGetRenderbufferParameteriv = _link_function('glGetRenderbufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0') -glGetRenderbufferParameterivEXT = _link_function('glGetRenderbufferParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], requires='None') -glGetSamplerParameterIiv = _link_function('glGetSamplerParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3') -glGetSamplerParameterIuiv = _link_function('glGetSamplerParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glGetSamplerParameterfv = _link_function('glGetSamplerParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 3.3') -glGetSamplerParameteriv = _link_function('glGetSamplerParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3') -glGetShaderInfoLog = _link_function('glGetShaderInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 2.0') -glGetShaderPrecisionFormat = _link_function('glGetShaderPrecisionFormat', None, [GLenum, GLenum, POINTER(GLint), POINTER(GLint)], requires='OpenGL 4.1') -glGetShaderSource = _link_function('glGetShaderSource', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 2.0') -glGetShaderiv = _link_function('glGetShaderiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 2.0') -glGetString = _link_function('glGetString', POINTER(GLubyte), [GLenum], requires='OpenGL 1.0') -glGetStringi = _link_function('glGetStringi', POINTER(GLubyte), [GLenum, GLuint], requires='OpenGL 3.0') -glGetSubroutineIndex = _link_function('glGetSubroutineIndex', GLuint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.0') -glGetSubroutineUniformLocation = _link_function('glGetSubroutineUniformLocation', GLint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.0') -glGetSynciv = _link_function('glGetSynciv', None, [GLsync, GLenum, GLsizei, POINTER(GLsizei), POINTER(GLint)], requires='OpenGL 3.2') -glGetTexEnvfv = _link_function('glGetTexEnvfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetTexEnviv = _link_function('glGetTexEnviv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetTexGendv = _link_function('glGetTexGendv', None, [GLenum, GLenum, POINTER(GLdouble)], requires='OpenGL 1.0') -glGetTexGenfv = _link_function('glGetTexGenfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetTexGeniv = _link_function('glGetTexGeniv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetTexImage = _link_function('glGetTexImage', None, [GLenum, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0') -glGetTexLevelParameterfv = _link_function('glGetTexLevelParameterfv', None, [GLenum, GLint, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetTexLevelParameteriv = _link_function('glGetTexLevelParameteriv', None, [GLenum, GLint, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetTexParameterIiv = _link_function('glGetTexParameterIiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0') -glGetTexParameterIuiv = _link_function('glGetTexParameterIuiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.0') -glGetTexParameterfv = _link_function('glGetTexParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glGetTexParameteriv = _link_function('glGetTexParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glGetTextureImage = _link_function('glGetTextureImage', None, [GLuint, GLint, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetTextureLevelParameterfv = _link_function('glGetTextureLevelParameterfv', None, [GLuint, GLint, GLenum, POINTER(GLfloat)], requires='OpenGL 4.5') -glGetTextureLevelParameteriv = _link_function('glGetTextureLevelParameteriv', None, [GLuint, GLint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetTextureParameterIiv = _link_function('glGetTextureParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetTextureParameterIuiv = _link_function('glGetTextureParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 4.5') -glGetTextureParameterfv = _link_function('glGetTextureParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 4.5') -glGetTextureParameteriv = _link_function('glGetTextureParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetTextureSubImage = _link_function('glGetTextureSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetTransformFeedbackVarying = _link_function('glGetTransformFeedbackVarying', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLsizei), POINTER(GLenum), POINTER(GLchar)], requires='OpenGL 3.0') -glGetTransformFeedbacki64_v = _link_function('glGetTransformFeedbacki64_v', None, [GLuint, GLenum, GLuint, POINTER(GLint64)], requires='OpenGL 4.5') -glGetTransformFeedbacki_v = _link_function('glGetTransformFeedbacki_v', None, [GLuint, GLenum, GLuint, POINTER(GLint)], requires='OpenGL 4.5') -glGetTransformFeedbackiv = _link_function('glGetTransformFeedbackiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetUniformBlockIndex = _link_function('glGetUniformBlockIndex', GLuint, [GLuint, POINTER(GLchar)], requires='OpenGL 3.1') -glGetUniformIndices = _link_function('glGetUniformIndices', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLuint)], requires='OpenGL 3.1') -glGetUniformLocation = _link_function('glGetUniformLocation', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 2.0') -glGetUniformSubroutineuiv = _link_function('glGetUniformSubroutineuiv', None, [GLenum, GLint, POINTER(GLuint)], requires='OpenGL 4.0') -glGetUniformdv = _link_function('glGetUniformdv', None, [GLuint, GLint, POINTER(GLdouble)], requires='OpenGL 4.0') -glGetUniformfv = _link_function('glGetUniformfv', None, [GLuint, GLint, POINTER(GLfloat)], requires='OpenGL 2.0') -glGetUniformiv = _link_function('glGetUniformiv', None, [GLuint, GLint, POINTER(GLint)], requires='OpenGL 2.0') -glGetUniformuiv = _link_function('glGetUniformuiv', None, [GLuint, GLint, POINTER(GLuint)], requires='OpenGL 3.0') -glGetVertexArrayIndexed64iv = _link_function('glGetVertexArrayIndexed64iv', None, [GLuint, GLuint, GLenum, POINTER(GLint64)], requires='OpenGL 4.5') -glGetVertexArrayIndexediv = _link_function('glGetVertexArrayIndexediv', None, [GLuint, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetVertexArrayiv = _link_function('glGetVertexArrayiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glGetVertexAttribIiv = _link_function('glGetVertexAttribIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.0') -glGetVertexAttribIuiv = _link_function('glGetVertexAttribIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 3.0') -glGetVertexAttribLdv = _link_function('glGetVertexAttribLdv', None, [GLuint, GLenum, POINTER(GLdouble)], requires='OpenGL 4.1') -glGetVertexAttribPointerv = _link_function('glGetVertexAttribPointerv', None, [GLuint, GLenum, POINTER(GLvoid)], requires='OpenGL 2.0') -glGetVertexAttribdv = _link_function('glGetVertexAttribdv', None, [GLuint, GLenum, POINTER(GLdouble)], requires='OpenGL 2.0') -glGetVertexAttribfv = _link_function('glGetVertexAttribfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 2.0') -glGetVertexAttribiv = _link_function('glGetVertexAttribiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 2.0') -glGetnColorTable = _link_function('glGetnColorTable', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnCompressedTexImage = _link_function('glGetnCompressedTexImage', None, [GLenum, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnConvolutionFilter = _link_function('glGetnConvolutionFilter', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnHistogram = _link_function('glGetnHistogram', None, [GLenum, GLboolean, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnMapdv = _link_function('glGetnMapdv', None, [GLenum, GLenum, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.5') -glGetnMapfv = _link_function('glGetnMapfv', None, [GLenum, GLenum, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.5') -glGetnMapiv = _link_function('glGetnMapiv', None, [GLenum, GLenum, GLsizei, POINTER(GLint)], requires='OpenGL 4.5') -glGetnMinmax = _link_function('glGetnMinmax', None, [GLenum, GLboolean, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnPixelMapfv = _link_function('glGetnPixelMapfv', None, [GLenum, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.5') -glGetnPixelMapuiv = _link_function('glGetnPixelMapuiv', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glGetnPixelMapusv = _link_function('glGetnPixelMapusv', None, [GLenum, GLsizei, POINTER(GLushort)], requires='OpenGL 4.5') -glGetnPolygonStipple = _link_function('glGetnPolygonStipple', None, [GLsizei, POINTER(GLubyte)], requires='OpenGL 4.5') -glGetnSeparableFilter = _link_function('glGetnSeparableFilter', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLvoid), GLsizei, POINTER(GLvoid), POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnTexImage = _link_function('glGetnTexImage', None, [GLenum, GLint, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glGetnUniformdv = _link_function('glGetnUniformdv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.5') -glGetnUniformfv = _link_function('glGetnUniformfv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.5') -glGetnUniformiv = _link_function('glGetnUniformiv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.5') -glGetnUniformuiv = _link_function('glGetnUniformuiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5') -glHint = _link_function('glHint', None, [GLenum, GLenum], requires='OpenGL 1.0') -glIndexMask = _link_function('glIndexMask', None, [GLuint], requires='OpenGL 1.0') -glIndexPointer = _link_function('glIndexPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glIndexd = _link_function('glIndexd', None, [GLdouble], requires='OpenGL 1.0') -glIndexdv = _link_function('glIndexdv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glIndexf = _link_function('glIndexf', None, [GLfloat], requires='OpenGL 1.0') -glIndexfv = _link_function('glIndexfv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glIndexi = _link_function('glIndexi', None, [GLint], requires='OpenGL 1.0') -glIndexiv = _link_function('glIndexiv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glIndexs = _link_function('glIndexs', None, [GLshort], requires='OpenGL 1.0') -glIndexsv = _link_function('glIndexsv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glIndexub = _link_function('glIndexub', None, [GLubyte], requires='OpenGL 1.1') -glIndexubv = _link_function('glIndexubv', None, [POINTER(GLubyte)], requires='OpenGL 1.1') -glInitNames = _link_function('glInitNames', None, [], requires='OpenGL 1.0') -glInterleavedArrays = _link_function('glInterleavedArrays', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glInvalidateBufferData = _link_function('glInvalidateBufferData', None, [GLuint], requires='OpenGL 4.3') -glInvalidateBufferSubData = _link_function('glInvalidateBufferSubData', None, [GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.3') -glInvalidateFramebuffer = _link_function('glInvalidateFramebuffer', None, [GLenum, GLsizei, POINTER(GLenum)], requires='OpenGL 4.3') -glInvalidateNamedFramebufferData = _link_function('glInvalidateNamedFramebufferData', None, [GLuint, GLsizei, POINTER(GLenum)], requires='OpenGL 4.5') -glInvalidateNamedFramebufferSubData = _link_function('glInvalidateNamedFramebufferSubData', None, [GLuint, GLsizei, POINTER(GLenum), GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.5') -glInvalidateSubFramebuffer = _link_function('glInvalidateSubFramebuffer', None, [GLenum, GLsizei, POINTER(GLenum), GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.3') -glInvalidateTexImage = _link_function('glInvalidateTexImage', None, [GLuint, GLint], requires='OpenGL 4.3') -glInvalidateTexSubImage = _link_function('glInvalidateTexSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.3') -glIsBuffer = _link_function('glIsBuffer', GLboolean, [GLuint], requires='OpenGL 1.5') -glIsEnabled = _link_function('glIsEnabled', GLboolean, [GLenum], requires='OpenGL 1.0') -glIsEnabledi = _link_function('glIsEnabledi', GLboolean, [GLenum, GLuint], requires='OpenGL 3.0') -glIsFramebuffer = _link_function('glIsFramebuffer', GLboolean, [GLuint], requires='OpenGL 3.0') -glIsFramebufferEXT = _link_function('glIsFramebufferEXT', GLboolean, [GLuint], requires='None') -glIsList = _link_function('glIsList', GLboolean, [GLuint], requires='OpenGL 1.0') -glIsProgram = _link_function('glIsProgram', GLboolean, [GLuint], requires='OpenGL 2.0') -glIsProgramPipeline = _link_function('glIsProgramPipeline', GLboolean, [GLuint], requires='OpenGL 4.1') -glIsQuery = _link_function('glIsQuery', GLboolean, [GLuint], requires='OpenGL 1.5') -glIsRenderbuffer = _link_function('glIsRenderbuffer', GLboolean, [GLuint], requires='OpenGL 3.0') -glIsRenderbufferEXT = _link_function('glIsRenderbufferEXT', GLboolean, [GLuint], requires='None') -glIsSampler = _link_function('glIsSampler', GLboolean, [GLuint], requires='OpenGL 3.3') -glIsShader = _link_function('glIsShader', GLboolean, [GLuint], requires='OpenGL 2.0') -glIsSync = _link_function('glIsSync', GLboolean, [GLsync], requires='OpenGL 3.2') -glIsTexture = _link_function('glIsTexture', GLboolean, [GLuint], requires='OpenGL 1.1') -glIsTransformFeedback = _link_function('glIsTransformFeedback', GLboolean, [GLuint], requires='OpenGL 4.0') -glIsVertexArray = _link_function('glIsVertexArray', GLboolean, [GLuint], requires='OpenGL 3.0') -glLightModelf = _link_function('glLightModelf', None, [GLenum, GLfloat], requires='OpenGL 1.0') -glLightModelfv = _link_function('glLightModelfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glLightModeli = _link_function('glLightModeli', None, [GLenum, GLint], requires='OpenGL 1.0') -glLightModeliv = _link_function('glLightModeliv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glLightf = _link_function('glLightf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0') -glLightfv = _link_function('glLightfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glLighti = _link_function('glLighti', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0') -glLightiv = _link_function('glLightiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glLineStipple = _link_function('glLineStipple', None, [GLint, GLushort], requires='OpenGL 1.0') -glLineWidth = _link_function('glLineWidth', None, [GLfloat], requires='OpenGL 1.0') -glLinkProgram = _link_function('glLinkProgram', None, [GLuint], requires='OpenGL 2.0') -glListBase = _link_function('glListBase', None, [GLuint], requires='OpenGL 1.0') -glLoadIdentity = _link_function('glLoadIdentity', None, [], requires='OpenGL 1.0') -glLoadMatrixd = _link_function('glLoadMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glLoadMatrixf = _link_function('glLoadMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glLoadName = _link_function('glLoadName', None, [GLuint], requires='OpenGL 1.0') -glLoadTransposeMatrixd = _link_function('glLoadTransposeMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.3') -glLoadTransposeMatrixf = _link_function('glLoadTransposeMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.3') -glLogicOp = _link_function('glLogicOp', None, [GLenum], requires='OpenGL 1.0') -glMap1d = _link_function('glMap1d', None, [GLenum, GLdouble, GLdouble, GLint, GLint, POINTER(GLdouble)], requires='OpenGL 1.0') -glMap1f = _link_function('glMap1f', None, [GLenum, GLfloat, GLfloat, GLint, GLint, POINTER(GLfloat)], requires='OpenGL 1.0') -glMap2d = _link_function('glMap2d', None, [GLenum, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, POINTER(GLdouble)], requires='OpenGL 1.0') -glMap2f = _link_function('glMap2f', None, [GLenum, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, POINTER(GLfloat)], requires='OpenGL 1.0') -glMapBuffer = _link_function('glMapBuffer', POINTER(None), [GLenum, GLenum], requires='OpenGL 1.5') -glMapBufferRange = _link_function('glMapBufferRange', POINTER(None), [GLenum, GLintptr, GLsizeiptr, GLbitfield], requires='OpenGL 3.0') -glMapGrid1d = _link_function('glMapGrid1d', None, [GLint, GLdouble, GLdouble], requires='OpenGL 1.0') -glMapGrid1f = _link_function('glMapGrid1f', None, [GLint, GLfloat, GLfloat], requires='OpenGL 1.0') -glMapGrid2d = _link_function('glMapGrid2d', None, [GLint, GLdouble, GLdouble, GLint, GLdouble, GLdouble], requires='OpenGL 1.0') -glMapGrid2f = _link_function('glMapGrid2f', None, [GLint, GLfloat, GLfloat, GLint, GLfloat, GLfloat], requires='OpenGL 1.0') -glMapNamedBuffer = _link_function('glMapNamedBuffer', POINTER(None), [GLuint, GLenum], requires='OpenGL 4.5') -glMapNamedBufferRange = _link_function('glMapNamedBufferRange', POINTER(None), [GLuint, GLintptr, GLsizeiptr, GLbitfield], requires='OpenGL 4.5') -glMaterialf = _link_function('glMaterialf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0') -glMaterialfv = _link_function('glMaterialfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glMateriali = _link_function('glMateriali', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0') -glMaterialiv = _link_function('glMaterialiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glMatrixMode = _link_function('glMatrixMode', None, [GLenum], requires='OpenGL 1.0') -glMemoryBarrier = _link_function('glMemoryBarrier', None, [GLbitfield], requires='OpenGL 4.2') -glMemoryBarrierByRegion = _link_function('glMemoryBarrierByRegion', None, [GLbitfield], requires='OpenGL 4.5') -glMinSampleShading = _link_function('glMinSampleShading', None, [GLfloat], requires='OpenGL 4.0') -glMultMatrixd = _link_function('glMultMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glMultMatrixf = _link_function('glMultMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glMultTransposeMatrixd = _link_function('glMultTransposeMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.3') -glMultTransposeMatrixf = _link_function('glMultTransposeMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.3') -glMultiDrawArrays = _link_function('glMultiDrawArrays', None, [GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei], requires='OpenGL 1.4') -glMultiDrawArraysIndirect = _link_function('glMultiDrawArraysIndirect', None, [GLenum, POINTER(GLvoid), GLsizei, GLsizei], requires='OpenGL 4.3') -glMultiDrawArraysIndirectCount = _link_function('glMultiDrawArraysIndirectCount', None, [GLenum, POINTER(GLvoid), GLintptr, GLsizei, GLsizei], requires='OpenGL 4.6') -glMultiDrawElements = _link_function('glMultiDrawElements', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 1.4') -glMultiDrawElementsBaseVertex = _link_function('glMultiDrawElementsBaseVertex', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(GLvoid), GLsizei, POINTER(GLint)], requires='OpenGL 3.2') -glMultiDrawElementsIndirect = _link_function('glMultiDrawElementsIndirect', None, [GLenum, GLenum, POINTER(GLvoid), GLsizei, GLsizei], requires='OpenGL 4.3') -glMultiDrawElementsIndirectCount = _link_function('glMultiDrawElementsIndirectCount', None, [GLenum, GLenum, POINTER(GLvoid), GLintptr, GLsizei, GLsizei], requires='OpenGL 4.6') -glMultiTexCoord1d = _link_function('glMultiTexCoord1d', None, [GLenum, GLdouble], requires='OpenGL 1.3') -glMultiTexCoord1dv = _link_function('glMultiTexCoord1dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3') -glMultiTexCoord1f = _link_function('glMultiTexCoord1f', None, [GLenum, GLfloat], requires='OpenGL 1.3') -glMultiTexCoord1fv = _link_function('glMultiTexCoord1fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3') -glMultiTexCoord1i = _link_function('glMultiTexCoord1i', None, [GLenum, GLint], requires='OpenGL 1.3') -glMultiTexCoord1iv = _link_function('glMultiTexCoord1iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3') -glMultiTexCoord1s = _link_function('glMultiTexCoord1s', None, [GLenum, GLshort], requires='OpenGL 1.3') -glMultiTexCoord1sv = _link_function('glMultiTexCoord1sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3') -glMultiTexCoord2d = _link_function('glMultiTexCoord2d', None, [GLenum, GLdouble, GLdouble], requires='OpenGL 1.3') -glMultiTexCoord2dv = _link_function('glMultiTexCoord2dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3') -glMultiTexCoord2f = _link_function('glMultiTexCoord2f', None, [GLenum, GLfloat, GLfloat], requires='OpenGL 1.3') -glMultiTexCoord2fv = _link_function('glMultiTexCoord2fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3') -glMultiTexCoord2i = _link_function('glMultiTexCoord2i', None, [GLenum, GLint, GLint], requires='OpenGL 1.3') -glMultiTexCoord2iv = _link_function('glMultiTexCoord2iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3') -glMultiTexCoord2s = _link_function('glMultiTexCoord2s', None, [GLenum, GLshort, GLshort], requires='OpenGL 1.3') -glMultiTexCoord2sv = _link_function('glMultiTexCoord2sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3') -glMultiTexCoord3d = _link_function('glMultiTexCoord3d', None, [GLenum, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.3') -glMultiTexCoord3dv = _link_function('glMultiTexCoord3dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3') -glMultiTexCoord3f = _link_function('glMultiTexCoord3f', None, [GLenum, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.3') -glMultiTexCoord3fv = _link_function('glMultiTexCoord3fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3') -glMultiTexCoord3i = _link_function('glMultiTexCoord3i', None, [GLenum, GLint, GLint, GLint], requires='OpenGL 1.3') -glMultiTexCoord3iv = _link_function('glMultiTexCoord3iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3') -glMultiTexCoord3s = _link_function('glMultiTexCoord3s', None, [GLenum, GLshort, GLshort, GLshort], requires='OpenGL 1.3') -glMultiTexCoord3sv = _link_function('glMultiTexCoord3sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3') -glMultiTexCoord4d = _link_function('glMultiTexCoord4d', None, [GLenum, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.3') -glMultiTexCoord4dv = _link_function('glMultiTexCoord4dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3') -glMultiTexCoord4f = _link_function('glMultiTexCoord4f', None, [GLenum, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.3') -glMultiTexCoord4fv = _link_function('glMultiTexCoord4fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3') -glMultiTexCoord4i = _link_function('glMultiTexCoord4i', None, [GLenum, GLint, GLint, GLint, GLint], requires='OpenGL 1.3') -glMultiTexCoord4iv = _link_function('glMultiTexCoord4iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3') -glMultiTexCoord4s = _link_function('glMultiTexCoord4s', None, [GLenum, GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.3') -glMultiTexCoord4sv = _link_function('glMultiTexCoord4sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3') -glMultiTexCoordP1ui = _link_function('glMultiTexCoordP1ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3') -glMultiTexCoordP1uiv = _link_function('glMultiTexCoordP1uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glMultiTexCoordP2ui = _link_function('glMultiTexCoordP2ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3') -glMultiTexCoordP2uiv = _link_function('glMultiTexCoordP2uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glMultiTexCoordP3ui = _link_function('glMultiTexCoordP3ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3') -glMultiTexCoordP3uiv = _link_function('glMultiTexCoordP3uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glMultiTexCoordP4ui = _link_function('glMultiTexCoordP4ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3') -glMultiTexCoordP4uiv = _link_function('glMultiTexCoordP4uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glNamedBufferData = _link_function('glNamedBufferData', None, [GLuint, GLsizeiptr, POINTER(GLvoid), GLenum], requires='OpenGL 4.5') -glNamedBufferStorage = _link_function('glNamedBufferStorage', None, [GLuint, GLsizeiptr, POINTER(GLvoid), GLbitfield], requires='OpenGL 4.5') -glNamedBufferSubData = _link_function('glNamedBufferSubData', None, [GLuint, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 4.5') -glNamedFramebufferDrawBuffer = _link_function('glNamedFramebufferDrawBuffer', None, [GLuint, GLenum], requires='OpenGL 4.5') -glNamedFramebufferDrawBuffers = _link_function('glNamedFramebufferDrawBuffers', None, [GLuint, GLsizei, POINTER(GLenum)], requires='OpenGL 4.5') -glNamedFramebufferParameteri = _link_function('glNamedFramebufferParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 4.5') -glNamedFramebufferReadBuffer = _link_function('glNamedFramebufferReadBuffer', None, [GLuint, GLenum], requires='OpenGL 4.5') -glNamedFramebufferRenderbuffer = _link_function('glNamedFramebufferRenderbuffer', None, [GLuint, GLenum, GLenum, GLuint], requires='OpenGL 4.5') -glNamedFramebufferTexture = _link_function('glNamedFramebufferTexture', None, [GLuint, GLenum, GLuint, GLint], requires='OpenGL 4.5') -glNamedFramebufferTextureLayer = _link_function('glNamedFramebufferTextureLayer', None, [GLuint, GLenum, GLuint, GLint, GLint], requires='OpenGL 4.5') -glNamedRenderbufferStorage = _link_function('glNamedRenderbufferStorage', None, [GLuint, GLenum, GLsizei, GLsizei], requires='OpenGL 4.5') -glNamedRenderbufferStorageMultisample = _link_function('glNamedRenderbufferStorageMultisample', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 4.5') -glNewList = _link_function('glNewList', None, [GLuint, GLenum], requires='OpenGL 1.0') -glNormal3b = _link_function('glNormal3b', None, [GLbyte, GLbyte, GLbyte], requires='OpenGL 1.0') -glNormal3bv = _link_function('glNormal3bv', None, [POINTER(GLbyte)], requires='OpenGL 1.0') -glNormal3d = _link_function('glNormal3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glNormal3dv = _link_function('glNormal3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glNormal3f = _link_function('glNormal3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glNormal3fv = _link_function('glNormal3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glNormal3i = _link_function('glNormal3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0') -glNormal3iv = _link_function('glNormal3iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glNormal3s = _link_function('glNormal3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glNormal3sv = _link_function('glNormal3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glNormalP3ui = _link_function('glNormalP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glNormalP3uiv = _link_function('glNormalP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glNormalPointer = _link_function('glNormalPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glObjectLabel = _link_function('glObjectLabel', None, [GLenum, GLuint, GLsizei, POINTER(GLchar)], requires='OpenGL 4.3') -glObjectPtrLabel = _link_function('glObjectPtrLabel', None, [POINTER(GLvoid), GLsizei, POINTER(GLchar)], requires='OpenGL 4.3') -glOrtho = _link_function('glOrtho', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glPassThrough = _link_function('glPassThrough', None, [GLfloat], requires='OpenGL 1.0') -glPatchParameterfv = _link_function('glPatchParameterfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 4.0') -glPatchParameteri = _link_function('glPatchParameteri', None, [GLenum, GLint], requires='OpenGL 4.0') -glPauseTransformFeedback = _link_function('glPauseTransformFeedback', None, [], requires='OpenGL 4.0') -glPixelMapfv = _link_function('glPixelMapfv', None, [GLenum, GLsizei, POINTER(GLfloat)], requires='OpenGL 1.0') -glPixelMapuiv = _link_function('glPixelMapuiv', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 1.0') -glPixelMapusv = _link_function('glPixelMapusv', None, [GLenum, GLsizei, POINTER(GLushort)], requires='OpenGL 1.0') -glPixelStoref = _link_function('glPixelStoref', None, [GLenum, GLfloat], requires='OpenGL 1.0') -glPixelStorei = _link_function('glPixelStorei', None, [GLenum, GLint], requires='OpenGL 1.0') -glPixelTransferf = _link_function('glPixelTransferf', None, [GLenum, GLfloat], requires='OpenGL 1.0') -glPixelTransferi = _link_function('glPixelTransferi', None, [GLenum, GLint], requires='OpenGL 1.0') -glPixelZoom = _link_function('glPixelZoom', None, [GLfloat, GLfloat], requires='OpenGL 1.0') -glPointParameterf = _link_function('glPointParameterf', None, [GLenum, GLfloat], requires='OpenGL 1.4') -glPointParameterfv = _link_function('glPointParameterfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.4') -glPointParameteri = _link_function('glPointParameteri', None, [GLenum, GLint], requires='OpenGL 1.4') -glPointParameteriv = _link_function('glPointParameteriv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.4') -glPointSize = _link_function('glPointSize', None, [GLfloat], requires='OpenGL 1.0') -glPolygonMode = _link_function('glPolygonMode', None, [GLenum, GLenum], requires='OpenGL 1.0') -glPolygonOffset = _link_function('glPolygonOffset', None, [GLfloat, GLfloat], requires='OpenGL 1.1') -glPolygonOffsetClamp = _link_function('glPolygonOffsetClamp', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 4.6') -glPolygonStipple = _link_function('glPolygonStipple', None, [POINTER(GLubyte)], requires='OpenGL 1.0') -glPopAttrib = _link_function('glPopAttrib', None, [], requires='OpenGL 1.0') -glPopClientAttrib = _link_function('glPopClientAttrib', None, [], requires='OpenGL 1.1') -glPopDebugGroup = _link_function('glPopDebugGroup', None, [], requires='OpenGL 4.3') -glPopMatrix = _link_function('glPopMatrix', None, [], requires='OpenGL 1.0') -glPopName = _link_function('glPopName', None, [], requires='OpenGL 1.0') -glPrimitiveRestartIndex = _link_function('glPrimitiveRestartIndex', None, [GLuint], requires='OpenGL 3.1') -glPrioritizeTextures = _link_function('glPrioritizeTextures', None, [GLsizei, POINTER(GLuint), POINTER(GLfloat)], requires='OpenGL 1.1') -glProgramBinary = _link_function('glProgramBinary', None, [GLuint, GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 4.1') -glProgramParameteri = _link_function('glProgramParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 4.1') -glProgramUniform1d = _link_function('glProgramUniform1d', None, [GLuint, GLint, GLdouble], requires='OpenGL 4.1') -glProgramUniform1dv = _link_function('glProgramUniform1dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniform1f = _link_function('glProgramUniform1f', None, [GLuint, GLint, GLfloat], requires='OpenGL 4.1') -glProgramUniform1fv = _link_function('glProgramUniform1fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniform1i = _link_function('glProgramUniform1i', None, [GLuint, GLint, GLint], requires='OpenGL 4.1') -glProgramUniform1iv = _link_function('glProgramUniform1iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1') -glProgramUniform1ui = _link_function('glProgramUniform1ui', None, [GLuint, GLint, GLuint], requires='OpenGL 4.1') -glProgramUniform1uiv = _link_function('glProgramUniform1uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1') -glProgramUniform2d = _link_function('glProgramUniform2d', None, [GLuint, GLint, GLdouble, GLdouble], requires='OpenGL 4.1') -glProgramUniform2dv = _link_function('glProgramUniform2dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniform2f = _link_function('glProgramUniform2f', None, [GLuint, GLint, GLfloat, GLfloat], requires='OpenGL 4.1') -glProgramUniform2fv = _link_function('glProgramUniform2fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniform2i = _link_function('glProgramUniform2i', None, [GLuint, GLint, GLint, GLint], requires='OpenGL 4.1') -glProgramUniform2iv = _link_function('glProgramUniform2iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1') -glProgramUniform2ui = _link_function('glProgramUniform2ui', None, [GLuint, GLint, GLuint, GLuint], requires='OpenGL 4.1') -glProgramUniform2uiv = _link_function('glProgramUniform2uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1') -glProgramUniform3d = _link_function('glProgramUniform3d', None, [GLuint, GLint, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1') -glProgramUniform3dv = _link_function('glProgramUniform3dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniform3f = _link_function('glProgramUniform3f', None, [GLuint, GLint, GLfloat, GLfloat, GLfloat], requires='OpenGL 4.1') -glProgramUniform3fv = _link_function('glProgramUniform3fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniform3i = _link_function('glProgramUniform3i', None, [GLuint, GLint, GLint, GLint, GLint], requires='OpenGL 4.1') -glProgramUniform3iv = _link_function('glProgramUniform3iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1') -glProgramUniform3ui = _link_function('glProgramUniform3ui', None, [GLuint, GLint, GLuint, GLuint, GLuint], requires='OpenGL 4.1') -glProgramUniform3uiv = _link_function('glProgramUniform3uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1') -glProgramUniform4d = _link_function('glProgramUniform4d', None, [GLuint, GLint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1') -glProgramUniform4dv = _link_function('glProgramUniform4dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniform4f = _link_function('glProgramUniform4f', None, [GLuint, GLint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 4.1') -glProgramUniform4fv = _link_function('glProgramUniform4fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniform4i = _link_function('glProgramUniform4i', None, [GLuint, GLint, GLint, GLint, GLint, GLint], requires='OpenGL 4.1') -glProgramUniform4iv = _link_function('glProgramUniform4iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1') -glProgramUniform4ui = _link_function('glProgramUniform4ui', None, [GLuint, GLint, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 4.1') -glProgramUniform4uiv = _link_function('glProgramUniform4uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1') -glProgramUniformMatrix2dv = _link_function('glProgramUniformMatrix2dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix2fv = _link_function('glProgramUniformMatrix2fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix2x3dv = _link_function('glProgramUniformMatrix2x3dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix2x3fv = _link_function('glProgramUniformMatrix2x3fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix2x4dv = _link_function('glProgramUniformMatrix2x4dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix2x4fv = _link_function('glProgramUniformMatrix2x4fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix3dv = _link_function('glProgramUniformMatrix3dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix3fv = _link_function('glProgramUniformMatrix3fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix3x2dv = _link_function('glProgramUniformMatrix3x2dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix3x2fv = _link_function('glProgramUniformMatrix3x2fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix3x4dv = _link_function('glProgramUniformMatrix3x4dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix3x4fv = _link_function('glProgramUniformMatrix3x4fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix4dv = _link_function('glProgramUniformMatrix4dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix4fv = _link_function('glProgramUniformMatrix4fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix4x2dv = _link_function('glProgramUniformMatrix4x2dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix4x2fv = _link_function('glProgramUniformMatrix4x2fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProgramUniformMatrix4x3dv = _link_function('glProgramUniformMatrix4x3dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1') -glProgramUniformMatrix4x3fv = _link_function('glProgramUniformMatrix4x3fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1') -glProvokingVertex = _link_function('glProvokingVertex', None, [GLenum], requires='OpenGL 3.2') -glPushAttrib = _link_function('glPushAttrib', None, [GLbitfield], requires='OpenGL 1.0') -glPushClientAttrib = _link_function('glPushClientAttrib', None, [GLbitfield], requires='OpenGL 1.1') -glPushDebugGroup = _link_function('glPushDebugGroup', None, [GLenum, GLuint, GLsizei, POINTER(GLchar)], requires='OpenGL 4.3') -glPushMatrix = _link_function('glPushMatrix', None, [], requires='OpenGL 1.0') -glPushName = _link_function('glPushName', None, [GLuint], requires='OpenGL 1.0') -glQueryCounter = _link_function('glQueryCounter', None, [GLuint, GLenum], requires='OpenGL 3.3') -glRasterPos2d = _link_function('glRasterPos2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0') -glRasterPos2dv = _link_function('glRasterPos2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glRasterPos2f = _link_function('glRasterPos2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0') -glRasterPos2fv = _link_function('glRasterPos2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glRasterPos2i = _link_function('glRasterPos2i', None, [GLint, GLint], requires='OpenGL 1.0') -glRasterPos2iv = _link_function('glRasterPos2iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glRasterPos2s = _link_function('glRasterPos2s', None, [GLshort, GLshort], requires='OpenGL 1.0') -glRasterPos2sv = _link_function('glRasterPos2sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glRasterPos3d = _link_function('glRasterPos3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glRasterPos3dv = _link_function('glRasterPos3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glRasterPos3f = _link_function('glRasterPos3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glRasterPos3fv = _link_function('glRasterPos3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glRasterPos3i = _link_function('glRasterPos3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0') -glRasterPos3iv = _link_function('glRasterPos3iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glRasterPos3s = _link_function('glRasterPos3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glRasterPos3sv = _link_function('glRasterPos3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glRasterPos4d = _link_function('glRasterPos4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glRasterPos4dv = _link_function('glRasterPos4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glRasterPos4f = _link_function('glRasterPos4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glRasterPos4fv = _link_function('glRasterPos4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glRasterPos4i = _link_function('glRasterPos4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0') -glRasterPos4iv = _link_function('glRasterPos4iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glRasterPos4s = _link_function('glRasterPos4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glRasterPos4sv = _link_function('glRasterPos4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glReadBuffer = _link_function('glReadBuffer', None, [GLenum], requires='OpenGL 1.0') -glReadPixels = _link_function('glReadPixels', None, [GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0') -glReadnPixels = _link_function('glReadnPixels', None, [GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5') -glRectd = _link_function('glRectd', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glRectdv = _link_function('glRectdv', None, [POINTER(GLdouble), POINTER(GLdouble)], requires='OpenGL 1.0') -glRectf = _link_function('glRectf', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glRectfv = _link_function('glRectfv', None, [POINTER(GLfloat), POINTER(GLfloat)], requires='OpenGL 1.0') -glRecti = _link_function('glRecti', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0') -glRectiv = _link_function('glRectiv', None, [POINTER(GLint), POINTER(GLint)], requires='OpenGL 1.0') -glRects = _link_function('glRects', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glRectsv = _link_function('glRectsv', None, [POINTER(GLshort), POINTER(GLshort)], requires='OpenGL 1.0') -glReleaseShaderCompiler = _link_function('glReleaseShaderCompiler', None, [], requires='OpenGL 4.1') -glRenderMode = _link_function('glRenderMode', GLint, [GLenum], requires='OpenGL 1.0') -glRenderbufferStorage = _link_function('glRenderbufferStorage', None, [GLenum, GLenum, GLsizei, GLsizei], requires='OpenGL 3.0') -glRenderbufferStorageEXT = _link_function('glRenderbufferStorageEXT', None, [GLenum, GLenum, GLsizei, GLsizei], requires='None') -glRenderbufferStorageMultisample = _link_function('glRenderbufferStorageMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 3.0') -glResumeTransformFeedback = _link_function('glResumeTransformFeedback', None, [], requires='OpenGL 4.0') -glRotated = _link_function('glRotated', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glRotatef = _link_function('glRotatef', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glSampleCoverage = _link_function('glSampleCoverage', None, [GLfloat, GLboolean], requires='OpenGL 1.3') -glSampleCoverageARB = _link_function('glSampleCoverageARB', None, [GLfloat, GLboolean], requires='None') -glSampleMaski = _link_function('glSampleMaski', None, [GLuint, GLbitfield], requires='OpenGL 3.2') -glSamplerParameterIiv = _link_function('glSamplerParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3') -glSamplerParameterIuiv = _link_function('glSamplerParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glSamplerParameterf = _link_function('glSamplerParameterf', None, [GLuint, GLenum, GLfloat], requires='OpenGL 3.3') -glSamplerParameterfv = _link_function('glSamplerParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 3.3') -glSamplerParameteri = _link_function('glSamplerParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 3.3') -glSamplerParameteriv = _link_function('glSamplerParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3') -glScaled = _link_function('glScaled', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glScalef = _link_function('glScalef', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glScissor = _link_function('glScissor', None, [GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.0') -glScissorArrayv = _link_function('glScissorArrayv', None, [GLuint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1') -glScissorIndexed = _link_function('glScissorIndexed', None, [GLuint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.1') -glScissorIndexedv = _link_function('glScissorIndexedv', None, [GLuint, POINTER(GLint)], requires='OpenGL 4.1') -glSecondaryColor3b = _link_function('glSecondaryColor3b', None, [GLbyte, GLbyte, GLbyte], requires='OpenGL 1.4') -glSecondaryColor3bv = _link_function('glSecondaryColor3bv', None, [POINTER(GLbyte)], requires='OpenGL 1.4') -glSecondaryColor3d = _link_function('glSecondaryColor3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.4') -glSecondaryColor3dv = _link_function('glSecondaryColor3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.4') -glSecondaryColor3f = _link_function('glSecondaryColor3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.4') -glSecondaryColor3fv = _link_function('glSecondaryColor3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.4') -glSecondaryColor3i = _link_function('glSecondaryColor3i', None, [GLint, GLint, GLint], requires='OpenGL 1.4') -glSecondaryColor3iv = _link_function('glSecondaryColor3iv', None, [POINTER(GLint)], requires='OpenGL 1.4') -glSecondaryColor3s = _link_function('glSecondaryColor3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.4') -glSecondaryColor3sv = _link_function('glSecondaryColor3sv', None, [POINTER(GLshort)], requires='OpenGL 1.4') -glSecondaryColor3ub = _link_function('glSecondaryColor3ub', None, [GLubyte, GLubyte, GLubyte], requires='OpenGL 1.4') -glSecondaryColor3ubv = _link_function('glSecondaryColor3ubv', None, [POINTER(GLubyte)], requires='OpenGL 1.4') -glSecondaryColor3ui = _link_function('glSecondaryColor3ui', None, [GLuint, GLuint, GLuint], requires='OpenGL 1.4') -glSecondaryColor3uiv = _link_function('glSecondaryColor3uiv', None, [POINTER(GLuint)], requires='OpenGL 1.4') -glSecondaryColor3us = _link_function('glSecondaryColor3us', None, [GLushort, GLushort, GLushort], requires='OpenGL 1.4') -glSecondaryColor3usv = _link_function('glSecondaryColor3usv', None, [POINTER(GLushort)], requires='OpenGL 1.4') -glSecondaryColorP3ui = _link_function('glSecondaryColorP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glSecondaryColorP3uiv = _link_function('glSecondaryColorP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glSecondaryColorPointer = _link_function('glSecondaryColorPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.4') -glSelectBuffer = _link_function('glSelectBuffer', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.0') -glShadeModel = _link_function('glShadeModel', None, [GLenum], requires='OpenGL 1.0') -glShaderBinary = _link_function('glShaderBinary', None, [GLsizei, POINTER(GLuint), GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 4.1') -glShaderSource = _link_function('glShaderSource', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLint)], requires='OpenGL 2.0') -glShaderStorageBlockBinding = _link_function('glShaderStorageBlockBinding', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.3') -glSpecializeShader = _link_function('glSpecializeShader', None, [GLuint, POINTER(GLchar), GLuint, POINTER(GLuint), POINTER(GLuint)], requires='OpenGL 4.6') -glStencilFunc = _link_function('glStencilFunc', None, [GLenum, GLint, GLuint], requires='OpenGL 1.0') -glStencilFuncSeparate = _link_function('glStencilFuncSeparate', None, [GLenum, GLenum, GLint, GLuint], requires='OpenGL 2.0') -glStencilMask = _link_function('glStencilMask', None, [GLuint], requires='OpenGL 1.0') -glStencilMaskSeparate = _link_function('glStencilMaskSeparate', None, [GLenum, GLuint], requires='OpenGL 2.0') -glStencilOp = _link_function('glStencilOp', None, [GLenum, GLenum, GLenum], requires='OpenGL 1.0') -glStencilOpSeparate = _link_function('glStencilOpSeparate', None, [GLenum, GLenum, GLenum, GLenum], requires='OpenGL 2.0') -glTexBuffer = _link_function('glTexBuffer', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.1') -glTexBufferRange = _link_function('glTexBufferRange', None, [GLenum, GLenum, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.3') -glTexCoord1d = _link_function('glTexCoord1d', None, [GLdouble], requires='OpenGL 1.0') -glTexCoord1dv = _link_function('glTexCoord1dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glTexCoord1f = _link_function('glTexCoord1f', None, [GLfloat], requires='OpenGL 1.0') -glTexCoord1fv = _link_function('glTexCoord1fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glTexCoord1i = _link_function('glTexCoord1i', None, [GLint], requires='OpenGL 1.0') -glTexCoord1iv = _link_function('glTexCoord1iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glTexCoord1s = _link_function('glTexCoord1s', None, [GLshort], requires='OpenGL 1.0') -glTexCoord1sv = _link_function('glTexCoord1sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glTexCoord2d = _link_function('glTexCoord2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0') -glTexCoord2dv = _link_function('glTexCoord2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glTexCoord2f = _link_function('glTexCoord2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0') -glTexCoord2fv = _link_function('glTexCoord2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glTexCoord2i = _link_function('glTexCoord2i', None, [GLint, GLint], requires='OpenGL 1.0') -glTexCoord2iv = _link_function('glTexCoord2iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glTexCoord2s = _link_function('glTexCoord2s', None, [GLshort, GLshort], requires='OpenGL 1.0') -glTexCoord2sv = _link_function('glTexCoord2sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glTexCoord3d = _link_function('glTexCoord3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glTexCoord3dv = _link_function('glTexCoord3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glTexCoord3f = _link_function('glTexCoord3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glTexCoord3fv = _link_function('glTexCoord3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glTexCoord3i = _link_function('glTexCoord3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0') -glTexCoord3iv = _link_function('glTexCoord3iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glTexCoord3s = _link_function('glTexCoord3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glTexCoord3sv = _link_function('glTexCoord3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glTexCoord4d = _link_function('glTexCoord4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glTexCoord4dv = _link_function('glTexCoord4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glTexCoord4f = _link_function('glTexCoord4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glTexCoord4fv = _link_function('glTexCoord4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glTexCoord4i = _link_function('glTexCoord4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0') -glTexCoord4iv = _link_function('glTexCoord4iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glTexCoord4s = _link_function('glTexCoord4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glTexCoord4sv = _link_function('glTexCoord4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glTexCoordP1ui = _link_function('glTexCoordP1ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glTexCoordP1uiv = _link_function('glTexCoordP1uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glTexCoordP2ui = _link_function('glTexCoordP2ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glTexCoordP2uiv = _link_function('glTexCoordP2uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glTexCoordP3ui = _link_function('glTexCoordP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glTexCoordP3uiv = _link_function('glTexCoordP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glTexCoordP4ui = _link_function('glTexCoordP4ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glTexCoordP4uiv = _link_function('glTexCoordP4uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glTexCoordPointer = _link_function('glTexCoordPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glTexEnvf = _link_function('glTexEnvf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0') -glTexEnvfv = _link_function('glTexEnvfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glTexEnvi = _link_function('glTexEnvi', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0') -glTexEnviv = _link_function('glTexEnviv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glTexGend = _link_function('glTexGend', None, [GLenum, GLenum, GLdouble], requires='OpenGL 1.0') -glTexGendv = _link_function('glTexGendv', None, [GLenum, GLenum, POINTER(GLdouble)], requires='OpenGL 1.0') -glTexGenf = _link_function('glTexGenf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0') -glTexGenfv = _link_function('glTexGenfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glTexGeni = _link_function('glTexGeni', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0') -glTexGeniv = _link_function('glTexGeniv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glTexImage1D = _link_function('glTexImage1D', None, [GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0') -glTexImage2D = _link_function('glTexImage2D', None, [GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0') -glTexImage2DMultisample = _link_function('glTexImage2DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLboolean], requires='OpenGL 3.2') -glTexImage3D = _link_function('glTexImage3D', None, [GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.2') -glTexImage3DMultisample = _link_function('glTexImage3DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLsizei, GLboolean], requires='OpenGL 3.2') -glTexParameterIiv = _link_function('glTexParameterIiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0') -glTexParameterIuiv = _link_function('glTexParameterIuiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.0') -glTexParameterf = _link_function('glTexParameterf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0') -glTexParameterfv = _link_function('glTexParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0') -glTexParameteri = _link_function('glTexParameteri', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0') -glTexParameteriv = _link_function('glTexParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0') -glTexStorage1D = _link_function('glTexStorage1D', None, [GLenum, GLsizei, GLenum, GLsizei], requires='OpenGL 4.2') -glTexStorage2D = _link_function('glTexStorage2D', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 4.2') -glTexStorage2DMultisample = _link_function('glTexStorage2DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.3') -glTexStorage3D = _link_function('glTexStorage3D', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.2') -glTexStorage3DMultisample = _link_function('glTexStorage3DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.3') -glTexSubImage1D = _link_function('glTexSubImage1D', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.1') -glTexSubImage2D = _link_function('glTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.1') -glTexSubImage3D = _link_function('glTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.2') -glTextureBarrier = _link_function('glTextureBarrier', None, [], requires='OpenGL 4.5') -glTextureBuffer = _link_function('glTextureBuffer', None, [GLuint, GLenum, GLuint], requires='OpenGL 4.5') -glTextureBufferRange = _link_function('glTextureBufferRange', None, [GLuint, GLenum, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.5') -glTextureParameterIiv = _link_function('glTextureParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glTextureParameterIuiv = _link_function('glTextureParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 4.5') -glTextureParameterf = _link_function('glTextureParameterf', None, [GLuint, GLenum, GLfloat], requires='OpenGL 4.5') -glTextureParameterfv = _link_function('glTextureParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 4.5') -glTextureParameteri = _link_function('glTextureParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 4.5') -glTextureParameteriv = _link_function('glTextureParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5') -glTextureStorage1D = _link_function('glTextureStorage1D', None, [GLuint, GLsizei, GLenum, GLsizei], requires='OpenGL 4.5') -glTextureStorage2D = _link_function('glTextureStorage2D', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 4.5') -glTextureStorage2DMultisample = _link_function('glTextureStorage2DMultisample', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.5') -glTextureStorage3D = _link_function('glTextureStorage3D', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.5') -glTextureStorage3DMultisample = _link_function('glTextureStorage3DMultisample', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.5') -glTextureSubImage1D = _link_function('glTextureSubImage1D', None, [GLuint, GLint, GLint, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5') -glTextureSubImage2D = _link_function('glTextureSubImage2D', None, [GLuint, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5') -glTextureSubImage3D = _link_function('glTextureSubImage3D', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5') -glTextureView = _link_function('glTextureView', None, [GLuint, GLenum, GLuint, GLenum, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 4.3') -glTransformFeedbackBufferBase = _link_function('glTransformFeedbackBufferBase', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.5') -glTransformFeedbackBufferRange = _link_function('glTransformFeedbackBufferRange', None, [GLuint, GLuint, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.5') -glTransformFeedbackVaryings = _link_function('glTransformFeedbackVaryings', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), GLenum], requires='OpenGL 3.0') -glTranslated = _link_function('glTranslated', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glTranslatef = _link_function('glTranslatef', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glUniform1d = _link_function('glUniform1d', None, [GLint, GLdouble], requires='OpenGL 4.0') -glUniform1dv = _link_function('glUniform1dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniform1f = _link_function('glUniform1f', None, [GLint, GLfloat], requires='OpenGL 2.0') -glUniform1fv = _link_function('glUniform1fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniform1i = _link_function('glUniform1i', None, [GLint, GLint], requires='OpenGL 2.0') -glUniform1iv = _link_function('glUniform1iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0') -glUniform1ui = _link_function('glUniform1ui', None, [GLint, GLuint], requires='OpenGL 3.0') -glUniform1uiv = _link_function('glUniform1uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glUniform2d = _link_function('glUniform2d', None, [GLint, GLdouble, GLdouble], requires='OpenGL 4.0') -glUniform2dv = _link_function('glUniform2dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniform2f = _link_function('glUniform2f', None, [GLint, GLfloat, GLfloat], requires='OpenGL 2.0') -glUniform2fv = _link_function('glUniform2fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniform2i = _link_function('glUniform2i', None, [GLint, GLint, GLint], requires='OpenGL 2.0') -glUniform2iv = _link_function('glUniform2iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0') -glUniform2ui = _link_function('glUniform2ui', None, [GLint, GLuint, GLuint], requires='OpenGL 3.0') -glUniform2uiv = _link_function('glUniform2uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glUniform3d = _link_function('glUniform3d', None, [GLint, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.0') -glUniform3dv = _link_function('glUniform3dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniform3f = _link_function('glUniform3f', None, [GLint, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0') -glUniform3fv = _link_function('glUniform3fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniform3i = _link_function('glUniform3i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 2.0') -glUniform3iv = _link_function('glUniform3iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0') -glUniform3ui = _link_function('glUniform3ui', None, [GLint, GLuint, GLuint, GLuint], requires='OpenGL 3.0') -glUniform3uiv = _link_function('glUniform3uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glUniform4d = _link_function('glUniform4d', None, [GLint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.0') -glUniform4dv = _link_function('glUniform4dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniform4f = _link_function('glUniform4f', None, [GLint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0') -glUniform4fv = _link_function('glUniform4fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniform4i = _link_function('glUniform4i', None, [GLint, GLint, GLint, GLint, GLint], requires='OpenGL 2.0') -glUniform4iv = _link_function('glUniform4iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0') -glUniform4ui = _link_function('glUniform4ui', None, [GLint, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 3.0') -glUniform4uiv = _link_function('glUniform4uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0') -glUniformBlockBinding = _link_function('glUniformBlockBinding', None, [GLuint, GLuint, GLuint], requires='OpenGL 3.1') -glUniformMatrix2dv = _link_function('glUniformMatrix2dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix2fv = _link_function('glUniformMatrix2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniformMatrix2x3dv = _link_function('glUniformMatrix2x3dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix2x3fv = _link_function('glUniformMatrix2x3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1') -glUniformMatrix2x4dv = _link_function('glUniformMatrix2x4dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix2x4fv = _link_function('glUniformMatrix2x4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1') -glUniformMatrix3dv = _link_function('glUniformMatrix3dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix3fv = _link_function('glUniformMatrix3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniformMatrix3x2dv = _link_function('glUniformMatrix3x2dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix3x2fv = _link_function('glUniformMatrix3x2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1') -glUniformMatrix3x4dv = _link_function('glUniformMatrix3x4dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix3x4fv = _link_function('glUniformMatrix3x4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1') -glUniformMatrix4dv = _link_function('glUniformMatrix4dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix4fv = _link_function('glUniformMatrix4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.0') -glUniformMatrix4x2dv = _link_function('glUniformMatrix4x2dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix4x2fv = _link_function('glUniformMatrix4x2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1') -glUniformMatrix4x3dv = _link_function('glUniformMatrix4x3dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0') -glUniformMatrix4x3fv = _link_function('glUniformMatrix4x3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1') -glUniformSubroutinesuiv = _link_function('glUniformSubroutinesuiv', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.0') -glUnmapBuffer = _link_function('glUnmapBuffer', GLboolean, [GLenum], requires='OpenGL 1.5') -glUnmapNamedBuffer = _link_function('glUnmapNamedBuffer', GLboolean, [GLuint], requires='OpenGL 4.5') -glUseProgram = _link_function('glUseProgram', None, [GLuint], requires='OpenGL 2.0') -glUseProgramStages = _link_function('glUseProgramStages', None, [GLuint, GLbitfield, GLuint], requires='OpenGL 4.1') -glValidateProgram = _link_function('glValidateProgram', None, [GLuint], requires='OpenGL 2.0') -glValidateProgramPipeline = _link_function('glValidateProgramPipeline', None, [GLuint], requires='OpenGL 4.1') -glVertex2d = _link_function('glVertex2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0') -glVertex2dv = _link_function('glVertex2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glVertex2f = _link_function('glVertex2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0') -glVertex2fv = _link_function('glVertex2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glVertex2i = _link_function('glVertex2i', None, [GLint, GLint], requires='OpenGL 1.0') -glVertex2iv = _link_function('glVertex2iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glVertex2s = _link_function('glVertex2s', None, [GLshort, GLshort], requires='OpenGL 1.0') -glVertex2sv = _link_function('glVertex2sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glVertex3d = _link_function('glVertex3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glVertex3dv = _link_function('glVertex3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glVertex3f = _link_function('glVertex3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glVertex3fv = _link_function('glVertex3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glVertex3i = _link_function('glVertex3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0') -glVertex3iv = _link_function('glVertex3iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glVertex3s = _link_function('glVertex3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glVertex3sv = _link_function('glVertex3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glVertex4d = _link_function('glVertex4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0') -glVertex4dv = _link_function('glVertex4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0') -glVertex4f = _link_function('glVertex4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0') -glVertex4fv = _link_function('glVertex4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0') -glVertex4i = _link_function('glVertex4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0') -glVertex4iv = _link_function('glVertex4iv', None, [POINTER(GLint)], requires='OpenGL 1.0') -glVertex4s = _link_function('glVertex4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0') -glVertex4sv = _link_function('glVertex4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0') -glVertexArrayAttribBinding = _link_function('glVertexArrayAttribBinding', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.5') -glVertexArrayAttribFormat = _link_function('glVertexArrayAttribFormat', None, [GLuint, GLuint, GLint, GLenum, GLboolean, GLuint], requires='OpenGL 4.5') -glVertexArrayAttribIFormat = _link_function('glVertexArrayAttribIFormat', None, [GLuint, GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.5') -glVertexArrayAttribLFormat = _link_function('glVertexArrayAttribLFormat', None, [GLuint, GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.5') -glVertexArrayBindingDivisor = _link_function('glVertexArrayBindingDivisor', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.5') -glVertexArrayElementBuffer = _link_function('glVertexArrayElementBuffer', None, [GLuint, GLuint], requires='OpenGL 4.5') -glVertexArrayVertexBuffer = _link_function('glVertexArrayVertexBuffer', None, [GLuint, GLuint, GLuint, GLintptr, GLsizei], requires='OpenGL 4.5') -glVertexArrayVertexBuffers = _link_function('glVertexArrayVertexBuffers', None, [GLuint, GLuint, GLsizei, POINTER(GLuint), POINTER(GLintptr), POINTER(GLsizei)], requires='OpenGL 4.5') -glVertexAttrib1d = _link_function('glVertexAttrib1d', None, [GLuint, GLdouble], requires='OpenGL 2.0') -glVertexAttrib1dv = _link_function('glVertexAttrib1dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0') -glVertexAttrib1f = _link_function('glVertexAttrib1f', None, [GLuint, GLfloat], requires='OpenGL 2.0') -glVertexAttrib1fv = _link_function('glVertexAttrib1fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0') -glVertexAttrib1s = _link_function('glVertexAttrib1s', None, [GLuint, GLshort], requires='OpenGL 2.0') -glVertexAttrib1sv = _link_function('glVertexAttrib1sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0') -glVertexAttrib2d = _link_function('glVertexAttrib2d', None, [GLuint, GLdouble, GLdouble], requires='OpenGL 2.0') -glVertexAttrib2dv = _link_function('glVertexAttrib2dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0') -glVertexAttrib2f = _link_function('glVertexAttrib2f', None, [GLuint, GLfloat, GLfloat], requires='OpenGL 2.0') -glVertexAttrib2fv = _link_function('glVertexAttrib2fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0') -glVertexAttrib2s = _link_function('glVertexAttrib2s', None, [GLuint, GLshort, GLshort], requires='OpenGL 2.0') -glVertexAttrib2sv = _link_function('glVertexAttrib2sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0') -glVertexAttrib3d = _link_function('glVertexAttrib3d', None, [GLuint, GLdouble, GLdouble, GLdouble], requires='OpenGL 2.0') -glVertexAttrib3dv = _link_function('glVertexAttrib3dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0') -glVertexAttrib3f = _link_function('glVertexAttrib3f', None, [GLuint, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0') -glVertexAttrib3fv = _link_function('glVertexAttrib3fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0') -glVertexAttrib3s = _link_function('glVertexAttrib3s', None, [GLuint, GLshort, GLshort, GLshort], requires='OpenGL 2.0') -glVertexAttrib3sv = _link_function('glVertexAttrib3sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0') -glVertexAttrib4Nbv = _link_function('glVertexAttrib4Nbv', None, [GLuint, POINTER(GLbyte)], requires='OpenGL 2.0') -glVertexAttrib4Niv = _link_function('glVertexAttrib4Niv', None, [GLuint, POINTER(GLint)], requires='OpenGL 2.0') -glVertexAttrib4Nsv = _link_function('glVertexAttrib4Nsv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0') -glVertexAttrib4Nub = _link_function('glVertexAttrib4Nub', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte], requires='OpenGL 2.0') -glVertexAttrib4Nubv = _link_function('glVertexAttrib4Nubv', None, [GLuint, POINTER(GLubyte)], requires='OpenGL 2.0') -glVertexAttrib4Nuiv = _link_function('glVertexAttrib4Nuiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 2.0') -glVertexAttrib4Nusv = _link_function('glVertexAttrib4Nusv', None, [GLuint, POINTER(GLushort)], requires='OpenGL 2.0') -glVertexAttrib4bv = _link_function('glVertexAttrib4bv', None, [GLuint, POINTER(GLbyte)], requires='OpenGL 2.0') -glVertexAttrib4d = _link_function('glVertexAttrib4d', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 2.0') -glVertexAttrib4dv = _link_function('glVertexAttrib4dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0') -glVertexAttrib4f = _link_function('glVertexAttrib4f', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0') -glVertexAttrib4fv = _link_function('glVertexAttrib4fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0') -glVertexAttrib4iv = _link_function('glVertexAttrib4iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 2.0') -glVertexAttrib4s = _link_function('glVertexAttrib4s', None, [GLuint, GLshort, GLshort, GLshort, GLshort], requires='OpenGL 2.0') -glVertexAttrib4sv = _link_function('glVertexAttrib4sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0') -glVertexAttrib4ubv = _link_function('glVertexAttrib4ubv', None, [GLuint, POINTER(GLubyte)], requires='OpenGL 2.0') -glVertexAttrib4uiv = _link_function('glVertexAttrib4uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 2.0') -glVertexAttrib4usv = _link_function('glVertexAttrib4usv', None, [GLuint, POINTER(GLushort)], requires='OpenGL 2.0') -glVertexAttribBinding = _link_function('glVertexAttribBinding', None, [GLuint, GLuint], requires='OpenGL 4.3') -glVertexAttribDivisor = _link_function('glVertexAttribDivisor', None, [GLuint, GLuint], requires='OpenGL 3.3') -glVertexAttribFormat = _link_function('glVertexAttribFormat', None, [GLuint, GLint, GLenum, GLboolean, GLuint], requires='OpenGL 4.3') -glVertexAttribI1i = _link_function('glVertexAttribI1i', None, [GLuint, GLint], requires='OpenGL 3.0') -glVertexAttribI1iv = _link_function('glVertexAttribI1iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0') -glVertexAttribI1ui = _link_function('glVertexAttribI1ui', None, [GLuint, GLuint], requires='OpenGL 3.0') -glVertexAttribI1uiv = _link_function('glVertexAttribI1uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0') -glVertexAttribI2i = _link_function('glVertexAttribI2i', None, [GLuint, GLint, GLint], requires='OpenGL 3.0') -glVertexAttribI2iv = _link_function('glVertexAttribI2iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0') -glVertexAttribI2ui = _link_function('glVertexAttribI2ui', None, [GLuint, GLuint, GLuint], requires='OpenGL 3.0') -glVertexAttribI2uiv = _link_function('glVertexAttribI2uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0') -glVertexAttribI3i = _link_function('glVertexAttribI3i', None, [GLuint, GLint, GLint, GLint], requires='OpenGL 3.0') -glVertexAttribI3iv = _link_function('glVertexAttribI3iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0') -glVertexAttribI3ui = _link_function('glVertexAttribI3ui', None, [GLuint, GLuint, GLuint, GLuint], requires='OpenGL 3.0') -glVertexAttribI3uiv = _link_function('glVertexAttribI3uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0') -glVertexAttribI4bv = _link_function('glVertexAttribI4bv', None, [GLuint, POINTER(GLbyte)], requires='OpenGL 3.0') -glVertexAttribI4i = _link_function('glVertexAttribI4i', None, [GLuint, GLint, GLint, GLint, GLint], requires='OpenGL 3.0') -glVertexAttribI4iv = _link_function('glVertexAttribI4iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0') -glVertexAttribI4sv = _link_function('glVertexAttribI4sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 3.0') -glVertexAttribI4ubv = _link_function('glVertexAttribI4ubv', None, [GLuint, POINTER(GLubyte)], requires='OpenGL 3.0') -glVertexAttribI4ui = _link_function('glVertexAttribI4ui', None, [GLuint, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 3.0') -glVertexAttribI4uiv = _link_function('glVertexAttribI4uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0') -glVertexAttribI4usv = _link_function('glVertexAttribI4usv', None, [GLuint, POINTER(GLushort)], requires='OpenGL 3.0') -glVertexAttribIFormat = _link_function('glVertexAttribIFormat', None, [GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.3') -glVertexAttribIPointer = _link_function('glVertexAttribIPointer', None, [GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 3.0') -glVertexAttribL1d = _link_function('glVertexAttribL1d', None, [GLuint, GLdouble], requires='OpenGL 4.1') -glVertexAttribL1dv = _link_function('glVertexAttribL1dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1') -glVertexAttribL2d = _link_function('glVertexAttribL2d', None, [GLuint, GLdouble, GLdouble], requires='OpenGL 4.1') -glVertexAttribL2dv = _link_function('glVertexAttribL2dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1') -glVertexAttribL3d = _link_function('glVertexAttribL3d', None, [GLuint, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1') -glVertexAttribL3dv = _link_function('glVertexAttribL3dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1') -glVertexAttribL4d = _link_function('glVertexAttribL4d', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1') -glVertexAttribL4dv = _link_function('glVertexAttribL4dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1') -glVertexAttribLFormat = _link_function('glVertexAttribLFormat', None, [GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.3') -glVertexAttribLPointer = _link_function('glVertexAttribLPointer', None, [GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.1') -glVertexAttribP1ui = _link_function('glVertexAttribP1ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3') -glVertexAttribP1uiv = _link_function('glVertexAttribP1uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexAttribP2ui = _link_function('glVertexAttribP2ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3') -glVertexAttribP2uiv = _link_function('glVertexAttribP2uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexAttribP3ui = _link_function('glVertexAttribP3ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3') -glVertexAttribP3uiv = _link_function('glVertexAttribP3uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexAttribP4ui = _link_function('glVertexAttribP4ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3') -glVertexAttribP4uiv = _link_function('glVertexAttribP4uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexAttribPointer = _link_function('glVertexAttribPointer', None, [GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)], requires='OpenGL 2.0') -glVertexBindingDivisor = _link_function('glVertexBindingDivisor', None, [GLuint, GLuint], requires='OpenGL 4.3') -glVertexP2ui = _link_function('glVertexP2ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glVertexP2uiv = _link_function('glVertexP2uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexP3ui = _link_function('glVertexP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glVertexP3uiv = _link_function('glVertexP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexP4ui = _link_function('glVertexP4ui', None, [GLenum, GLuint], requires='OpenGL 3.3') -glVertexP4uiv = _link_function('glVertexP4uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3') -glVertexPointer = _link_function('glVertexPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1') -glViewport = _link_function('glViewport', None, [GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.0') -glViewportArrayv = _link_function('glViewportArrayv', None, [GLuint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1') -glViewportIndexedf = _link_function('glViewportIndexedf', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 4.1') -glViewportIndexedfv = _link_function('glViewportIndexedfv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 4.1') -glWaitSync = _link_function('glWaitSync', None, [GLsync, GLbitfield, GLuint64], requires='OpenGL 3.2') -glWindowPos2d = _link_function('glWindowPos2d', None, [GLdouble, GLdouble], requires='OpenGL 1.4') -glWindowPos2dv = _link_function('glWindowPos2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.4') -glWindowPos2f = _link_function('glWindowPos2f', None, [GLfloat, GLfloat], requires='OpenGL 1.4') -glWindowPos2fv = _link_function('glWindowPos2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.4') -glWindowPos2i = _link_function('glWindowPos2i', None, [GLint, GLint], requires='OpenGL 1.4') -glWindowPos2iv = _link_function('glWindowPos2iv', None, [POINTER(GLint)], requires='OpenGL 1.4') -glWindowPos2s = _link_function('glWindowPos2s', None, [GLshort, GLshort], requires='OpenGL 1.4') -glWindowPos2sv = _link_function('glWindowPos2sv', None, [POINTER(GLshort)], requires='OpenGL 1.4') -glWindowPos3d = _link_function('glWindowPos3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.4') -glWindowPos3dv = _link_function('glWindowPos3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.4') -glWindowPos3f = _link_function('glWindowPos3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.4') -glWindowPos3fv = _link_function('glWindowPos3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.4') -glWindowPos3i = _link_function('glWindowPos3i', None, [GLint, GLint, GLint], requires='OpenGL 1.4') -glWindowPos3iv = _link_function('glWindowPos3iv', None, [POINTER(GLint)], requires='OpenGL 1.4') -glWindowPos3s = _link_function('glWindowPos3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.4') -glWindowPos3sv = _link_function('glWindowPos3sv', None, [POINTER(GLshort)], requires='OpenGL 1.4') - - -__all__ = [ - 'GLenum', - 'GLboolean', - 'GLbitfield', - 'GLvoid', - 'GLbyte', - 'GLubyte', - 'GLshort', - 'GLushort', - 'GLint', - 'GLuint', - 'GLclampx', - 'GLsizei', - 'GLfloat', - 'GLclampf', - 'GLdouble', - 'GLclampd', - 'GLchar', - 'GLintptr', - 'GLsizeiptr', - 'GLint64', - 'GLuint64', - 'GLsync', - 'GLDEBUGPROC', - 'GL_DEPTH_BUFFER_BIT', - 'GL_STENCIL_BUFFER_BIT', - 'GL_COLOR_BUFFER_BIT', - 'GL_FALSE', - 'GL_TRUE', - 'GL_POINTS', - 'GL_LINES', - 'GL_LINE_LOOP', - 'GL_LINE_STRIP', - 'GL_TRIANGLES', - 'GL_TRIANGLE_STRIP', - 'GL_TRIANGLE_FAN', - 'GL_QUADS', - 'GL_NEVER', - 'GL_LESS', - 'GL_EQUAL', - 'GL_LEQUAL', - 'GL_GREATER', - 'GL_NOTEQUAL', - 'GL_GEQUAL', - 'GL_ALWAYS', - 'GL_ZERO', - 'GL_ONE', - 'GL_SRC_COLOR', - 'GL_ONE_MINUS_SRC_COLOR', - 'GL_SRC_ALPHA', - 'GL_ONE_MINUS_SRC_ALPHA', - 'GL_DST_ALPHA', - 'GL_ONE_MINUS_DST_ALPHA', - 'GL_DST_COLOR', - 'GL_ONE_MINUS_DST_COLOR', - 'GL_SRC_ALPHA_SATURATE', - 'GL_NONE', - 'GL_FRONT_LEFT', - 'GL_FRONT_RIGHT', - 'GL_BACK_LEFT', - 'GL_BACK_RIGHT', - 'GL_FRONT', - 'GL_BACK', - 'GL_LEFT', - 'GL_RIGHT', - 'GL_FRONT_AND_BACK', - 'GL_NO_ERROR', - 'GL_INVALID_ENUM', - 'GL_INVALID_VALUE', - 'GL_INVALID_OPERATION', - 'GL_OUT_OF_MEMORY', - 'GL_CW', - 'GL_CCW', - 'GL_POINT_SIZE', - 'GL_POINT_SIZE_RANGE', - 'GL_POINT_SIZE_GRANULARITY', - 'GL_LINE_SMOOTH', - 'GL_LINE_WIDTH', - 'GL_LINE_WIDTH_RANGE', - 'GL_LINE_WIDTH_GRANULARITY', - 'GL_POLYGON_MODE', - 'GL_POLYGON_SMOOTH', - 'GL_CULL_FACE', - 'GL_CULL_FACE_MODE', - 'GL_FRONT_FACE', - 'GL_DEPTH_RANGE', - 'GL_DEPTH_TEST', - 'GL_DEPTH_WRITEMASK', - 'GL_DEPTH_CLEAR_VALUE', - 'GL_DEPTH_FUNC', - 'GL_STENCIL_TEST', - 'GL_STENCIL_CLEAR_VALUE', - 'GL_STENCIL_FUNC', - 'GL_STENCIL_VALUE_MASK', - 'GL_STENCIL_FAIL', - 'GL_STENCIL_PASS_DEPTH_FAIL', - 'GL_STENCIL_PASS_DEPTH_PASS', - 'GL_STENCIL_REF', - 'GL_STENCIL_WRITEMASK', - 'GL_VIEWPORT', - 'GL_DITHER', - 'GL_BLEND_DST', - 'GL_BLEND_SRC', - 'GL_BLEND', - 'GL_LOGIC_OP_MODE', - 'GL_DRAW_BUFFER', - 'GL_READ_BUFFER', - 'GL_SCISSOR_BOX', - 'GL_SCISSOR_TEST', - 'GL_COLOR_CLEAR_VALUE', - 'GL_COLOR_WRITEMASK', - 'GL_DOUBLEBUFFER', - 'GL_STEREO', - 'GL_LINE_SMOOTH_HINT', - 'GL_POLYGON_SMOOTH_HINT', - 'GL_UNPACK_SWAP_BYTES', - 'GL_UNPACK_LSB_FIRST', - 'GL_UNPACK_ROW_LENGTH', - 'GL_UNPACK_SKIP_ROWS', - 'GL_UNPACK_SKIP_PIXELS', - 'GL_UNPACK_ALIGNMENT', - 'GL_PACK_SWAP_BYTES', - 'GL_PACK_LSB_FIRST', - 'GL_PACK_ROW_LENGTH', - 'GL_PACK_SKIP_ROWS', - 'GL_PACK_SKIP_PIXELS', - 'GL_PACK_ALIGNMENT', - 'GL_MAX_TEXTURE_SIZE', - 'GL_MAX_VIEWPORT_DIMS', - 'GL_SUBPIXEL_BITS', - 'GL_TEXTURE_1D', - 'GL_TEXTURE_2D', - 'GL_TEXTURE_WIDTH', - 'GL_TEXTURE_HEIGHT', - 'GL_TEXTURE_BORDER_COLOR', - 'GL_DONT_CARE', - 'GL_FASTEST', - 'GL_NICEST', - 'GL_BYTE', - 'GL_UNSIGNED_BYTE', - 'GL_SHORT', - 'GL_UNSIGNED_SHORT', - 'GL_INT', - 'GL_UNSIGNED_INT', - 'GL_FLOAT', - 'GL_STACK_OVERFLOW', - 'GL_STACK_UNDERFLOW', - 'GL_CLEAR', - 'GL_AND', - 'GL_AND_REVERSE', - 'GL_COPY', - 'GL_AND_INVERTED', - 'GL_NOOP', - 'GL_XOR', - 'GL_OR', - 'GL_NOR', - 'GL_EQUIV', - 'GL_INVERT', - 'GL_OR_REVERSE', - 'GL_COPY_INVERTED', - 'GL_OR_INVERTED', - 'GL_NAND', - 'GL_SET', - 'GL_TEXTURE', - 'GL_COLOR', - 'GL_DEPTH', - 'GL_STENCIL', - 'GL_STENCIL_INDEX', - 'GL_DEPTH_COMPONENT', - 'GL_RED', - 'GL_GREEN', - 'GL_BLUE', - 'GL_ALPHA', - 'GL_RGB', - 'GL_RGBA', - 'GL_POINT', - 'GL_LINE', - 'GL_FILL', - 'GL_KEEP', - 'GL_REPLACE', - 'GL_INCR', - 'GL_DECR', - 'GL_VENDOR', - 'GL_RENDERER', - 'GL_VERSION', - 'GL_EXTENSIONS', - 'GL_NEAREST', - 'GL_LINEAR', - 'GL_NEAREST_MIPMAP_NEAREST', - 'GL_LINEAR_MIPMAP_NEAREST', - 'GL_NEAREST_MIPMAP_LINEAR', - 'GL_LINEAR_MIPMAP_LINEAR', - 'GL_TEXTURE_MAG_FILTER', - 'GL_TEXTURE_MIN_FILTER', - 'GL_TEXTURE_WRAP_S', - 'GL_TEXTURE_WRAP_T', - 'GL_REPEAT', - 'GL_CURRENT_BIT', - 'GL_POINT_BIT', - 'GL_LINE_BIT', - 'GL_POLYGON_BIT', - 'GL_POLYGON_STIPPLE_BIT', - 'GL_PIXEL_MODE_BIT', - 'GL_LIGHTING_BIT', - 'GL_FOG_BIT', - 'GL_ACCUM_BUFFER_BIT', - 'GL_VIEWPORT_BIT', - 'GL_TRANSFORM_BIT', - 'GL_ENABLE_BIT', - 'GL_HINT_BIT', - 'GL_EVAL_BIT', - 'GL_LIST_BIT', - 'GL_TEXTURE_BIT', - 'GL_SCISSOR_BIT', - 'GL_ALL_ATTRIB_BITS', - 'GL_QUAD_STRIP', - 'GL_POLYGON', - 'GL_ACCUM', - 'GL_LOAD', - 'GL_RETURN', - 'GL_MULT', - 'GL_ADD', - 'GL_AUX0', - 'GL_AUX1', - 'GL_AUX2', - 'GL_AUX3', - 'GL_2D', - 'GL_3D', - 'GL_3D_COLOR', - 'GL_3D_COLOR_TEXTURE', - 'GL_4D_COLOR_TEXTURE', - 'GL_PASS_THROUGH_TOKEN', - 'GL_POINT_TOKEN', - 'GL_LINE_TOKEN', - 'GL_POLYGON_TOKEN', - 'GL_BITMAP_TOKEN', - 'GL_DRAW_PIXEL_TOKEN', - 'GL_COPY_PIXEL_TOKEN', - 'GL_LINE_RESET_TOKEN', - 'GL_EXP', - 'GL_EXP2', - 'GL_COEFF', - 'GL_ORDER', - 'GL_DOMAIN', - 'GL_PIXEL_MAP_I_TO_I', - 'GL_PIXEL_MAP_S_TO_S', - 'GL_PIXEL_MAP_I_TO_R', - 'GL_PIXEL_MAP_I_TO_G', - 'GL_PIXEL_MAP_I_TO_B', - 'GL_PIXEL_MAP_I_TO_A', - 'GL_PIXEL_MAP_R_TO_R', - 'GL_PIXEL_MAP_G_TO_G', - 'GL_PIXEL_MAP_B_TO_B', - 'GL_PIXEL_MAP_A_TO_A', - 'GL_CURRENT_COLOR', - 'GL_CURRENT_INDEX', - 'GL_CURRENT_NORMAL', - 'GL_CURRENT_TEXTURE_COORDS', - 'GL_CURRENT_RASTER_COLOR', - 'GL_CURRENT_RASTER_INDEX', - 'GL_CURRENT_RASTER_TEXTURE_COORDS', - 'GL_CURRENT_RASTER_POSITION', - 'GL_CURRENT_RASTER_POSITION_VALID', - 'GL_CURRENT_RASTER_DISTANCE', - 'GL_POINT_SMOOTH', - 'GL_LINE_STIPPLE', - 'GL_LINE_STIPPLE_PATTERN', - 'GL_LINE_STIPPLE_REPEAT', - 'GL_LIST_MODE', - 'GL_MAX_LIST_NESTING', - 'GL_LIST_BASE', - 'GL_LIST_INDEX', - 'GL_POLYGON_STIPPLE', - 'GL_EDGE_FLAG', - 'GL_LIGHTING', - 'GL_LIGHT_MODEL_LOCAL_VIEWER', - 'GL_LIGHT_MODEL_TWO_SIDE', - 'GL_LIGHT_MODEL_AMBIENT', - 'GL_SHADE_MODEL', - 'GL_COLOR_MATERIAL_FACE', - 'GL_COLOR_MATERIAL_PARAMETER', - 'GL_COLOR_MATERIAL', - 'GL_FOG', - 'GL_FOG_INDEX', - 'GL_FOG_DENSITY', - 'GL_FOG_START', - 'GL_FOG_END', - 'GL_FOG_MODE', - 'GL_FOG_COLOR', - 'GL_ACCUM_CLEAR_VALUE', - 'GL_MATRIX_MODE', - 'GL_NORMALIZE', - 'GL_MODELVIEW_STACK_DEPTH', - 'GL_PROJECTION_STACK_DEPTH', - 'GL_TEXTURE_STACK_DEPTH', - 'GL_MODELVIEW_MATRIX', - 'GL_PROJECTION_MATRIX', - 'GL_TEXTURE_MATRIX', - 'GL_ATTRIB_STACK_DEPTH', - 'GL_ALPHA_TEST', - 'GL_ALPHA_TEST_FUNC', - 'GL_ALPHA_TEST_REF', - 'GL_LOGIC_OP', - 'GL_AUX_BUFFERS', - 'GL_INDEX_CLEAR_VALUE', - 'GL_INDEX_WRITEMASK', - 'GL_INDEX_MODE', - 'GL_RGBA_MODE', - 'GL_RENDER_MODE', - 'GL_PERSPECTIVE_CORRECTION_HINT', - 'GL_POINT_SMOOTH_HINT', - 'GL_FOG_HINT', - 'GL_TEXTURE_GEN_S', - 'GL_TEXTURE_GEN_T', - 'GL_TEXTURE_GEN_R', - 'GL_TEXTURE_GEN_Q', - 'GL_PIXEL_MAP_I_TO_I_SIZE', - 'GL_PIXEL_MAP_S_TO_S_SIZE', - 'GL_PIXEL_MAP_I_TO_R_SIZE', - 'GL_PIXEL_MAP_I_TO_G_SIZE', - 'GL_PIXEL_MAP_I_TO_B_SIZE', - 'GL_PIXEL_MAP_I_TO_A_SIZE', - 'GL_PIXEL_MAP_R_TO_R_SIZE', - 'GL_PIXEL_MAP_G_TO_G_SIZE', - 'GL_PIXEL_MAP_B_TO_B_SIZE', - 'GL_PIXEL_MAP_A_TO_A_SIZE', - 'GL_MAP_COLOR', - 'GL_MAP_STENCIL', - 'GL_INDEX_SHIFT', - 'GL_INDEX_OFFSET', - 'GL_RED_SCALE', - 'GL_RED_BIAS', - 'GL_ZOOM_X', - 'GL_ZOOM_Y', - 'GL_GREEN_SCALE', - 'GL_GREEN_BIAS', - 'GL_BLUE_SCALE', - 'GL_BLUE_BIAS', - 'GL_ALPHA_SCALE', - 'GL_ALPHA_BIAS', - 'GL_DEPTH_SCALE', - 'GL_DEPTH_BIAS', - 'GL_MAX_EVAL_ORDER', - 'GL_MAX_LIGHTS', - 'GL_MAX_CLIP_PLANES', - 'GL_MAX_PIXEL_MAP_TABLE', - 'GL_MAX_ATTRIB_STACK_DEPTH', - 'GL_MAX_MODELVIEW_STACK_DEPTH', - 'GL_MAX_NAME_STACK_DEPTH', - 'GL_MAX_PROJECTION_STACK_DEPTH', - 'GL_MAX_TEXTURE_STACK_DEPTH', - 'GL_INDEX_BITS', - 'GL_RED_BITS', - 'GL_GREEN_BITS', - 'GL_BLUE_BITS', - 'GL_ALPHA_BITS', - 'GL_DEPTH_BITS', - 'GL_STENCIL_BITS', - 'GL_ACCUM_RED_BITS', - 'GL_ACCUM_GREEN_BITS', - 'GL_ACCUM_BLUE_BITS', - 'GL_ACCUM_ALPHA_BITS', - 'GL_NAME_STACK_DEPTH', - 'GL_AUTO_NORMAL', - 'GL_MAP1_COLOR_4', - 'GL_MAP1_INDEX', - 'GL_MAP1_NORMAL', - 'GL_MAP1_TEXTURE_COORD_1', - 'GL_MAP1_TEXTURE_COORD_2', - 'GL_MAP1_TEXTURE_COORD_3', - 'GL_MAP1_TEXTURE_COORD_4', - 'GL_MAP1_VERTEX_3', - 'GL_MAP1_VERTEX_4', - 'GL_MAP2_COLOR_4', - 'GL_MAP2_INDEX', - 'GL_MAP2_NORMAL', - 'GL_MAP2_TEXTURE_COORD_1', - 'GL_MAP2_TEXTURE_COORD_2', - 'GL_MAP2_TEXTURE_COORD_3', - 'GL_MAP2_TEXTURE_COORD_4', - 'GL_MAP2_VERTEX_3', - 'GL_MAP2_VERTEX_4', - 'GL_MAP1_GRID_DOMAIN', - 'GL_MAP1_GRID_SEGMENTS', - 'GL_MAP2_GRID_DOMAIN', - 'GL_MAP2_GRID_SEGMENTS', - 'GL_TEXTURE_COMPONENTS', - 'GL_TEXTURE_BORDER', - 'GL_AMBIENT', - 'GL_DIFFUSE', - 'GL_SPECULAR', - 'GL_POSITION', - 'GL_SPOT_DIRECTION', - 'GL_SPOT_EXPONENT', - 'GL_SPOT_CUTOFF', - 'GL_CONSTANT_ATTENUATION', - 'GL_LINEAR_ATTENUATION', - 'GL_QUADRATIC_ATTENUATION', - 'GL_COMPILE', - 'GL_COMPILE_AND_EXECUTE', - 'GL_2_BYTES', - 'GL_3_BYTES', - 'GL_4_BYTES', - 'GL_EMISSION', - 'GL_SHININESS', - 'GL_AMBIENT_AND_DIFFUSE', - 'GL_COLOR_INDEXES', - 'GL_MODELVIEW', - 'GL_PROJECTION', - 'GL_COLOR_INDEX', - 'GL_LUMINANCE', - 'GL_LUMINANCE_ALPHA', - 'GL_BITMAP', - 'GL_RENDER', - 'GL_FEEDBACK', - 'GL_SELECT', - 'GL_FLAT', - 'GL_SMOOTH', - 'GL_S', - 'GL_T', - 'GL_R', - 'GL_Q', - 'GL_MODULATE', - 'GL_DECAL', - 'GL_TEXTURE_ENV_MODE', - 'GL_TEXTURE_ENV_COLOR', - 'GL_TEXTURE_ENV', - 'GL_EYE_LINEAR', - 'GL_OBJECT_LINEAR', - 'GL_SPHERE_MAP', - 'GL_TEXTURE_GEN_MODE', - 'GL_OBJECT_PLANE', - 'GL_EYE_PLANE', - 'GL_CLAMP', - 'GL_CLIP_PLANE0', - 'GL_CLIP_PLANE1', - 'GL_CLIP_PLANE2', - 'GL_CLIP_PLANE3', - 'GL_CLIP_PLANE4', - 'GL_CLIP_PLANE5', - 'GL_LIGHT0', - 'GL_LIGHT1', - 'GL_LIGHT2', - 'GL_LIGHT3', - 'GL_LIGHT4', - 'GL_LIGHT5', - 'GL_LIGHT6', - 'GL_LIGHT7', - 'GL_COLOR_LOGIC_OP', - 'GL_POLYGON_OFFSET_UNITS', - 'GL_POLYGON_OFFSET_POINT', - 'GL_POLYGON_OFFSET_LINE', - 'GL_POLYGON_OFFSET_FILL', - 'GL_POLYGON_OFFSET_FACTOR', - 'GL_TEXTURE_BINDING_1D', - 'GL_TEXTURE_BINDING_2D', - 'GL_TEXTURE_INTERNAL_FORMAT', - 'GL_TEXTURE_RED_SIZE', - 'GL_TEXTURE_GREEN_SIZE', - 'GL_TEXTURE_BLUE_SIZE', - 'GL_TEXTURE_ALPHA_SIZE', - 'GL_DOUBLE', - 'GL_PROXY_TEXTURE_1D', - 'GL_PROXY_TEXTURE_2D', - 'GL_R3_G3_B2', - 'GL_RGB4', - 'GL_RGB5', - 'GL_RGB8', - 'GL_RGB10', - 'GL_RGB12', - 'GL_RGB16', - 'GL_RGBA2', - 'GL_RGBA4', - 'GL_RGB5_A1', - 'GL_RGBA8', - 'GL_RGB10_A2', - 'GL_RGBA12', - 'GL_RGBA16', - 'GL_CLIENT_PIXEL_STORE_BIT', - 'GL_CLIENT_VERTEX_ARRAY_BIT', - 'GL_CLIENT_ALL_ATTRIB_BITS', - 'GL_VERTEX_ARRAY_POINTER', - 'GL_NORMAL_ARRAY_POINTER', - 'GL_COLOR_ARRAY_POINTER', - 'GL_INDEX_ARRAY_POINTER', - 'GL_TEXTURE_COORD_ARRAY_POINTER', - 'GL_EDGE_FLAG_ARRAY_POINTER', - 'GL_FEEDBACK_BUFFER_POINTER', - 'GL_SELECTION_BUFFER_POINTER', - 'GL_CLIENT_ATTRIB_STACK_DEPTH', - 'GL_INDEX_LOGIC_OP', - 'GL_MAX_CLIENT_ATTRIB_STACK_DEPTH', - 'GL_FEEDBACK_BUFFER_SIZE', - 'GL_FEEDBACK_BUFFER_TYPE', - 'GL_SELECTION_BUFFER_SIZE', - 'GL_VERTEX_ARRAY', - 'GL_NORMAL_ARRAY', - 'GL_COLOR_ARRAY', - 'GL_INDEX_ARRAY', - 'GL_TEXTURE_COORD_ARRAY', - 'GL_EDGE_FLAG_ARRAY', - 'GL_VERTEX_ARRAY_SIZE', - 'GL_VERTEX_ARRAY_TYPE', - 'GL_VERTEX_ARRAY_STRIDE', - 'GL_NORMAL_ARRAY_TYPE', - 'GL_NORMAL_ARRAY_STRIDE', - 'GL_COLOR_ARRAY_SIZE', - 'GL_COLOR_ARRAY_TYPE', - 'GL_COLOR_ARRAY_STRIDE', - 'GL_INDEX_ARRAY_TYPE', - 'GL_INDEX_ARRAY_STRIDE', - 'GL_TEXTURE_COORD_ARRAY_SIZE', - 'GL_TEXTURE_COORD_ARRAY_TYPE', - 'GL_TEXTURE_COORD_ARRAY_STRIDE', - 'GL_EDGE_FLAG_ARRAY_STRIDE', - 'GL_TEXTURE_LUMINANCE_SIZE', - 'GL_TEXTURE_INTENSITY_SIZE', - 'GL_TEXTURE_PRIORITY', - 'GL_TEXTURE_RESIDENT', - 'GL_ALPHA4', - 'GL_ALPHA8', - 'GL_ALPHA12', - 'GL_ALPHA16', - 'GL_LUMINANCE4', - 'GL_LUMINANCE8', - 'GL_LUMINANCE12', - 'GL_LUMINANCE16', - 'GL_LUMINANCE4_ALPHA4', - 'GL_LUMINANCE6_ALPHA2', - 'GL_LUMINANCE8_ALPHA8', - 'GL_LUMINANCE12_ALPHA4', - 'GL_LUMINANCE12_ALPHA12', - 'GL_LUMINANCE16_ALPHA16', - 'GL_INTENSITY', - 'GL_INTENSITY4', - 'GL_INTENSITY8', - 'GL_INTENSITY12', - 'GL_INTENSITY16', - 'GL_V2F', - 'GL_V3F', - 'GL_C4UB_V2F', - 'GL_C4UB_V3F', - 'GL_C3F_V3F', - 'GL_N3F_V3F', - 'GL_C4F_N3F_V3F', - 'GL_T2F_V3F', - 'GL_T4F_V4F', - 'GL_T2F_C4UB_V3F', - 'GL_T2F_C3F_V3F', - 'GL_T2F_N3F_V3F', - 'GL_T2F_C4F_N3F_V3F', - 'GL_T4F_C4F_N3F_V4F', - 'GL_UNSIGNED_BYTE_3_3_2', - 'GL_UNSIGNED_SHORT_4_4_4_4', - 'GL_UNSIGNED_SHORT_5_5_5_1', - 'GL_UNSIGNED_INT_8_8_8_8', - 'GL_UNSIGNED_INT_10_10_10_2', - 'GL_TEXTURE_BINDING_3D', - 'GL_PACK_SKIP_IMAGES', - 'GL_PACK_IMAGE_HEIGHT', - 'GL_UNPACK_SKIP_IMAGES', - 'GL_UNPACK_IMAGE_HEIGHT', - 'GL_TEXTURE_3D', - 'GL_PROXY_TEXTURE_3D', - 'GL_TEXTURE_DEPTH', - 'GL_TEXTURE_WRAP_R', - 'GL_MAX_3D_TEXTURE_SIZE', - 'GL_UNSIGNED_BYTE_2_3_3_REV', - 'GL_UNSIGNED_SHORT_5_6_5', - 'GL_UNSIGNED_SHORT_5_6_5_REV', - 'GL_UNSIGNED_SHORT_4_4_4_4_REV', - 'GL_UNSIGNED_SHORT_1_5_5_5_REV', - 'GL_UNSIGNED_INT_8_8_8_8_REV', - 'GL_UNSIGNED_INT_2_10_10_10_REV', - 'GL_BGR', - 'GL_BGRA', - 'GL_MAX_ELEMENTS_VERTICES', - 'GL_MAX_ELEMENTS_INDICES', - 'GL_CLAMP_TO_EDGE', - 'GL_TEXTURE_MIN_LOD', - 'GL_TEXTURE_MAX_LOD', - 'GL_TEXTURE_BASE_LEVEL', - 'GL_TEXTURE_MAX_LEVEL', - 'GL_SMOOTH_POINT_SIZE_RANGE', - 'GL_SMOOTH_POINT_SIZE_GRANULARITY', - 'GL_SMOOTH_LINE_WIDTH_RANGE', - 'GL_SMOOTH_LINE_WIDTH_GRANULARITY', - 'GL_ALIASED_LINE_WIDTH_RANGE', - 'GL_RESCALE_NORMAL', - 'GL_LIGHT_MODEL_COLOR_CONTROL', - 'GL_SINGLE_COLOR', - 'GL_SEPARATE_SPECULAR_COLOR', - 'GL_ALIASED_POINT_SIZE_RANGE', - 'GL_TEXTURE0', - 'GL_TEXTURE1', - 'GL_TEXTURE2', - 'GL_TEXTURE3', - 'GL_TEXTURE4', - 'GL_TEXTURE5', - 'GL_TEXTURE6', - 'GL_TEXTURE7', - 'GL_TEXTURE8', - 'GL_TEXTURE9', - 'GL_TEXTURE10', - 'GL_TEXTURE11', - 'GL_TEXTURE12', - 'GL_TEXTURE13', - 'GL_TEXTURE14', - 'GL_TEXTURE15', - 'GL_TEXTURE16', - 'GL_TEXTURE17', - 'GL_TEXTURE18', - 'GL_TEXTURE19', - 'GL_TEXTURE20', - 'GL_TEXTURE21', - 'GL_TEXTURE22', - 'GL_TEXTURE23', - 'GL_TEXTURE24', - 'GL_TEXTURE25', - 'GL_TEXTURE26', - 'GL_TEXTURE27', - 'GL_TEXTURE28', - 'GL_TEXTURE29', - 'GL_TEXTURE30', - 'GL_TEXTURE31', - 'GL_ACTIVE_TEXTURE', - 'GL_MULTISAMPLE', - 'GL_SAMPLE_ALPHA_TO_COVERAGE', - 'GL_SAMPLE_ALPHA_TO_ONE', - 'GL_SAMPLE_COVERAGE', - 'GL_SAMPLE_BUFFERS', - 'GL_SAMPLES', - 'GL_SAMPLE_COVERAGE_VALUE', - 'GL_SAMPLE_COVERAGE_INVERT', - 'GL_TEXTURE_CUBE_MAP', - 'GL_TEXTURE_BINDING_CUBE_MAP', - 'GL_TEXTURE_CUBE_MAP_POSITIVE_X', - 'GL_TEXTURE_CUBE_MAP_NEGATIVE_X', - 'GL_TEXTURE_CUBE_MAP_POSITIVE_Y', - 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y', - 'GL_TEXTURE_CUBE_MAP_POSITIVE_Z', - 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z', - 'GL_PROXY_TEXTURE_CUBE_MAP', - 'GL_MAX_CUBE_MAP_TEXTURE_SIZE', - 'GL_COMPRESSED_RGB', - 'GL_COMPRESSED_RGBA', - 'GL_TEXTURE_COMPRESSION_HINT', - 'GL_TEXTURE_COMPRESSED_IMAGE_SIZE', - 'GL_TEXTURE_COMPRESSED', - 'GL_NUM_COMPRESSED_TEXTURE_FORMATS', - 'GL_COMPRESSED_TEXTURE_FORMATS', - 'GL_CLAMP_TO_BORDER', - 'GL_CLIENT_ACTIVE_TEXTURE', - 'GL_MAX_TEXTURE_UNITS', - 'GL_TRANSPOSE_MODELVIEW_MATRIX', - 'GL_TRANSPOSE_PROJECTION_MATRIX', - 'GL_TRANSPOSE_TEXTURE_MATRIX', - 'GL_TRANSPOSE_COLOR_MATRIX', - 'GL_MULTISAMPLE_BIT', - 'GL_NORMAL_MAP', - 'GL_REFLECTION_MAP', - 'GL_COMPRESSED_ALPHA', - 'GL_COMPRESSED_LUMINANCE', - 'GL_COMPRESSED_LUMINANCE_ALPHA', - 'GL_COMPRESSED_INTENSITY', - 'GL_COMBINE', - 'GL_COMBINE_RGB', - 'GL_COMBINE_ALPHA', - 'GL_SOURCE0_RGB', - 'GL_SOURCE1_RGB', - 'GL_SOURCE2_RGB', - 'GL_SOURCE0_ALPHA', - 'GL_SOURCE1_ALPHA', - 'GL_SOURCE2_ALPHA', - 'GL_OPERAND0_RGB', - 'GL_OPERAND1_RGB', - 'GL_OPERAND2_RGB', - 'GL_OPERAND0_ALPHA', - 'GL_OPERAND1_ALPHA', - 'GL_OPERAND2_ALPHA', - 'GL_RGB_SCALE', - 'GL_ADD_SIGNED', - 'GL_INTERPOLATE', - 'GL_SUBTRACT', - 'GL_CONSTANT', - 'GL_PRIMARY_COLOR', - 'GL_PREVIOUS', - 'GL_DOT3_RGB', - 'GL_DOT3_RGBA', - 'GL_BLEND_DST_RGB', - 'GL_BLEND_SRC_RGB', - 'GL_BLEND_DST_ALPHA', - 'GL_BLEND_SRC_ALPHA', - 'GL_POINT_FADE_THRESHOLD_SIZE', - 'GL_DEPTH_COMPONENT16', - 'GL_DEPTH_COMPONENT24', - 'GL_DEPTH_COMPONENT32', - 'GL_MIRRORED_REPEAT', - 'GL_MAX_TEXTURE_LOD_BIAS', - 'GL_TEXTURE_LOD_BIAS', - 'GL_INCR_WRAP', - 'GL_DECR_WRAP', - 'GL_TEXTURE_DEPTH_SIZE', - 'GL_TEXTURE_COMPARE_MODE', - 'GL_TEXTURE_COMPARE_FUNC', - 'GL_POINT_SIZE_MIN', - 'GL_POINT_SIZE_MAX', - 'GL_POINT_DISTANCE_ATTENUATION', - 'GL_GENERATE_MIPMAP', - 'GL_GENERATE_MIPMAP_HINT', - 'GL_FOG_COORDINATE_SOURCE', - 'GL_FOG_COORDINATE', - 'GL_FRAGMENT_DEPTH', - 'GL_CURRENT_FOG_COORDINATE', - 'GL_FOG_COORDINATE_ARRAY_TYPE', - 'GL_FOG_COORDINATE_ARRAY_STRIDE', - 'GL_FOG_COORDINATE_ARRAY_POINTER', - 'GL_FOG_COORDINATE_ARRAY', - 'GL_COLOR_SUM', - 'GL_CURRENT_SECONDARY_COLOR', - 'GL_SECONDARY_COLOR_ARRAY_SIZE', - 'GL_SECONDARY_COLOR_ARRAY_TYPE', - 'GL_SECONDARY_COLOR_ARRAY_STRIDE', - 'GL_SECONDARY_COLOR_ARRAY_POINTER', - 'GL_SECONDARY_COLOR_ARRAY', - 'GL_TEXTURE_FILTER_CONTROL', - 'GL_DEPTH_TEXTURE_MODE', - 'GL_COMPARE_R_TO_TEXTURE', - 'GL_BLEND_COLOR', - 'GL_BLEND_EQUATION', - 'GL_CONSTANT_COLOR', - 'GL_ONE_MINUS_CONSTANT_COLOR', - 'GL_CONSTANT_ALPHA', - 'GL_ONE_MINUS_CONSTANT_ALPHA', - 'GL_FUNC_ADD', - 'GL_FUNC_REVERSE_SUBTRACT', - 'GL_FUNC_SUBTRACT', - 'GL_MIN', - 'GL_MAX', - 'GL_BUFFER_SIZE', - 'GL_BUFFER_USAGE', - 'GL_QUERY_COUNTER_BITS', - 'GL_CURRENT_QUERY', - 'GL_QUERY_RESULT', - 'GL_QUERY_RESULT_AVAILABLE', - 'GL_ARRAY_BUFFER', - 'GL_ELEMENT_ARRAY_BUFFER', - 'GL_ARRAY_BUFFER_BINDING', - 'GL_ELEMENT_ARRAY_BUFFER_BINDING', - 'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING', - 'GL_READ_ONLY', - 'GL_WRITE_ONLY', - 'GL_READ_WRITE', - 'GL_BUFFER_ACCESS', - 'GL_BUFFER_MAPPED', - 'GL_BUFFER_MAP_POINTER', - 'GL_STREAM_DRAW', - 'GL_STREAM_READ', - 'GL_STREAM_COPY', - 'GL_STATIC_DRAW', - 'GL_STATIC_READ', - 'GL_STATIC_COPY', - 'GL_DYNAMIC_DRAW', - 'GL_DYNAMIC_READ', - 'GL_DYNAMIC_COPY', - 'GL_SAMPLES_PASSED', - 'GL_SRC1_ALPHA', - 'GL_VERTEX_ARRAY_BUFFER_BINDING', - 'GL_NORMAL_ARRAY_BUFFER_BINDING', - 'GL_COLOR_ARRAY_BUFFER_BINDING', - 'GL_INDEX_ARRAY_BUFFER_BINDING', - 'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING', - 'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING', - 'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING', - 'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING', - 'GL_WEIGHT_ARRAY_BUFFER_BINDING', - 'GL_FOG_COORD_SRC', - 'GL_FOG_COORD', - 'GL_CURRENT_FOG_COORD', - 'GL_FOG_COORD_ARRAY_TYPE', - 'GL_FOG_COORD_ARRAY_STRIDE', - 'GL_FOG_COORD_ARRAY_POINTER', - 'GL_FOG_COORD_ARRAY', - 'GL_FOG_COORD_ARRAY_BUFFER_BINDING', - 'GL_SRC0_RGB', - 'GL_SRC1_RGB', - 'GL_SRC2_RGB', - 'GL_SRC0_ALPHA', - 'GL_SRC2_ALPHA', - 'GL_BLEND_EQUATION_RGB', - 'GL_VERTEX_ATTRIB_ARRAY_ENABLED', - 'GL_VERTEX_ATTRIB_ARRAY_SIZE', - 'GL_VERTEX_ATTRIB_ARRAY_STRIDE', - 'GL_VERTEX_ATTRIB_ARRAY_TYPE', - 'GL_CURRENT_VERTEX_ATTRIB', - 'GL_VERTEX_PROGRAM_POINT_SIZE', - 'GL_VERTEX_ATTRIB_ARRAY_POINTER', - 'GL_STENCIL_BACK_FUNC', - 'GL_STENCIL_BACK_FAIL', - 'GL_STENCIL_BACK_PASS_DEPTH_FAIL', - 'GL_STENCIL_BACK_PASS_DEPTH_PASS', - 'GL_MAX_DRAW_BUFFERS', - 'GL_DRAW_BUFFER0', - 'GL_DRAW_BUFFER1', - 'GL_DRAW_BUFFER2', - 'GL_DRAW_BUFFER3', - 'GL_DRAW_BUFFER4', - 'GL_DRAW_BUFFER5', - 'GL_DRAW_BUFFER6', - 'GL_DRAW_BUFFER7', - 'GL_DRAW_BUFFER8', - 'GL_DRAW_BUFFER9', - 'GL_DRAW_BUFFER10', - 'GL_DRAW_BUFFER11', - 'GL_DRAW_BUFFER12', - 'GL_DRAW_BUFFER13', - 'GL_DRAW_BUFFER14', - 'GL_DRAW_BUFFER15', - 'GL_BLEND_EQUATION_ALPHA', - 'GL_MAX_VERTEX_ATTRIBS', - 'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED', - 'GL_MAX_TEXTURE_IMAGE_UNITS', - 'GL_FRAGMENT_SHADER', - 'GL_VERTEX_SHADER', - 'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS', - 'GL_MAX_VERTEX_UNIFORM_COMPONENTS', - 'GL_MAX_VARYING_FLOATS', - 'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS', - 'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS', - 'GL_SHADER_TYPE', - 'GL_FLOAT_VEC2', - 'GL_FLOAT_VEC3', - 'GL_FLOAT_VEC4', - 'GL_INT_VEC2', - 'GL_INT_VEC3', - 'GL_INT_VEC4', - 'GL_BOOL', - 'GL_BOOL_VEC2', - 'GL_BOOL_VEC3', - 'GL_BOOL_VEC4', - 'GL_FLOAT_MAT2', - 'GL_FLOAT_MAT3', - 'GL_FLOAT_MAT4', - 'GL_SAMPLER_1D', - 'GL_SAMPLER_2D', - 'GL_SAMPLER_3D', - 'GL_SAMPLER_CUBE', - 'GL_SAMPLER_1D_SHADOW', - 'GL_SAMPLER_2D_SHADOW', - 'GL_DELETE_STATUS', - 'GL_COMPILE_STATUS', - 'GL_LINK_STATUS', - 'GL_VALIDATE_STATUS', - 'GL_INFO_LOG_LENGTH', - 'GL_ATTACHED_SHADERS', - 'GL_ACTIVE_UNIFORMS', - 'GL_ACTIVE_UNIFORM_MAX_LENGTH', - 'GL_SHADER_SOURCE_LENGTH', - 'GL_ACTIVE_ATTRIBUTES', - 'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH', - 'GL_FRAGMENT_SHADER_DERIVATIVE_HINT', - 'GL_SHADING_LANGUAGE_VERSION', - 'GL_CURRENT_PROGRAM', - 'GL_POINT_SPRITE_COORD_ORIGIN', - 'GL_LOWER_LEFT', - 'GL_UPPER_LEFT', - 'GL_STENCIL_BACK_REF', - 'GL_STENCIL_BACK_VALUE_MASK', - 'GL_STENCIL_BACK_WRITEMASK', - 'GL_VERTEX_PROGRAM_TWO_SIDE', - 'GL_POINT_SPRITE', - 'GL_COORD_REPLACE', - 'GL_MAX_TEXTURE_COORDS', - 'GL_PIXEL_PACK_BUFFER', - 'GL_PIXEL_UNPACK_BUFFER', - 'GL_PIXEL_PACK_BUFFER_BINDING', - 'GL_PIXEL_UNPACK_BUFFER_BINDING', - 'GL_FLOAT_MAT2x3', - 'GL_FLOAT_MAT2x4', - 'GL_FLOAT_MAT3x2', - 'GL_FLOAT_MAT3x4', - 'GL_FLOAT_MAT4x2', - 'GL_FLOAT_MAT4x3', - 'GL_SRGB', - 'GL_SRGB8', - 'GL_SRGB_ALPHA', - 'GL_SRGB8_ALPHA8', - 'GL_COMPRESSED_SRGB', - 'GL_COMPRESSED_SRGB_ALPHA', - 'GL_CURRENT_RASTER_SECONDARY_COLOR', - 'GL_SLUMINANCE_ALPHA', - 'GL_SLUMINANCE8_ALPHA8', - 'GL_SLUMINANCE', - 'GL_SLUMINANCE8', - 'GL_COMPRESSED_SLUMINANCE', - 'GL_COMPRESSED_SLUMINANCE_ALPHA', - 'GL_COMPARE_REF_TO_TEXTURE', - 'GL_CLIP_DISTANCE0', - 'GL_CLIP_DISTANCE1', - 'GL_CLIP_DISTANCE2', - 'GL_CLIP_DISTANCE3', - 'GL_CLIP_DISTANCE4', - 'GL_CLIP_DISTANCE5', - 'GL_CLIP_DISTANCE6', - 'GL_CLIP_DISTANCE7', - 'GL_MAX_CLIP_DISTANCES', - 'GL_MAJOR_VERSION', - 'GL_MINOR_VERSION', - 'GL_NUM_EXTENSIONS', - 'GL_CONTEXT_FLAGS', - 'GL_COMPRESSED_RED', - 'GL_COMPRESSED_RG', - 'GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT', - 'GL_RGBA32F', - 'GL_RGB32F', - 'GL_RGBA16F', - 'GL_RGB16F', - 'GL_VERTEX_ATTRIB_ARRAY_INTEGER', - 'GL_MAX_ARRAY_TEXTURE_LAYERS', - 'GL_MIN_PROGRAM_TEXEL_OFFSET', - 'GL_MAX_PROGRAM_TEXEL_OFFSET', - 'GL_CLAMP_READ_COLOR', - 'GL_FIXED_ONLY', - 'GL_MAX_VARYING_COMPONENTS', - 'GL_TEXTURE_1D_ARRAY', - 'GL_PROXY_TEXTURE_1D_ARRAY', - 'GL_TEXTURE_2D_ARRAY', - 'GL_PROXY_TEXTURE_2D_ARRAY', - 'GL_TEXTURE_BINDING_1D_ARRAY', - 'GL_TEXTURE_BINDING_2D_ARRAY', - 'GL_R11F_G11F_B10F', - 'GL_UNSIGNED_INT_10F_11F_11F_REV', - 'GL_RGB9_E5', - 'GL_UNSIGNED_INT_5_9_9_9_REV', - 'GL_TEXTURE_SHARED_SIZE', - 'GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH', - 'GL_TRANSFORM_FEEDBACK_BUFFER_MODE', - 'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS', - 'GL_TRANSFORM_FEEDBACK_VARYINGS', - 'GL_TRANSFORM_FEEDBACK_BUFFER_START', - 'GL_TRANSFORM_FEEDBACK_BUFFER_SIZE', - 'GL_PRIMITIVES_GENERATED', - 'GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN', - 'GL_RASTERIZER_DISCARD', - 'GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS', - 'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS', - 'GL_INTERLEAVED_ATTRIBS', - 'GL_SEPARATE_ATTRIBS', - 'GL_TRANSFORM_FEEDBACK_BUFFER', - 'GL_TRANSFORM_FEEDBACK_BUFFER_BINDING', - 'GL_RGBA32UI', - 'GL_RGB32UI', - 'GL_RGBA16UI', - 'GL_RGB16UI', - 'GL_RGBA8UI', - 'GL_RGB8UI', - 'GL_RGBA32I', - 'GL_RGB32I', - 'GL_RGBA16I', - 'GL_RGB16I', - 'GL_RGBA8I', - 'GL_RGB8I', - 'GL_RED_INTEGER', - 'GL_GREEN_INTEGER', - 'GL_BLUE_INTEGER', - 'GL_RGB_INTEGER', - 'GL_RGBA_INTEGER', - 'GL_BGR_INTEGER', - 'GL_BGRA_INTEGER', - 'GL_SAMPLER_1D_ARRAY', - 'GL_SAMPLER_2D_ARRAY', - 'GL_SAMPLER_1D_ARRAY_SHADOW', - 'GL_SAMPLER_2D_ARRAY_SHADOW', - 'GL_SAMPLER_CUBE_SHADOW', - 'GL_UNSIGNED_INT_VEC2', - 'GL_UNSIGNED_INT_VEC3', - 'GL_UNSIGNED_INT_VEC4', - 'GL_INT_SAMPLER_1D', - 'GL_INT_SAMPLER_2D', - 'GL_INT_SAMPLER_3D', - 'GL_INT_SAMPLER_CUBE', - 'GL_INT_SAMPLER_1D_ARRAY', - 'GL_INT_SAMPLER_2D_ARRAY', - 'GL_UNSIGNED_INT_SAMPLER_1D', - 'GL_UNSIGNED_INT_SAMPLER_2D', - 'GL_UNSIGNED_INT_SAMPLER_3D', - 'GL_UNSIGNED_INT_SAMPLER_CUBE', - 'GL_UNSIGNED_INT_SAMPLER_1D_ARRAY', - 'GL_UNSIGNED_INT_SAMPLER_2D_ARRAY', - 'GL_QUERY_WAIT', - 'GL_QUERY_NO_WAIT', - 'GL_QUERY_BY_REGION_WAIT', - 'GL_QUERY_BY_REGION_NO_WAIT', - 'GL_BUFFER_ACCESS_FLAGS', - 'GL_BUFFER_MAP_LENGTH', - 'GL_BUFFER_MAP_OFFSET', - 'GL_DEPTH_COMPONENT32F', - 'GL_DEPTH32F_STENCIL8', - 'GL_FLOAT_32_UNSIGNED_INT_24_8_REV', - 'GL_INVALID_FRAMEBUFFER_OPERATION', - 'GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING', - 'GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE', - 'GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE', - 'GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE', - 'GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE', - 'GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE', - 'GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE', - 'GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE', - 'GL_FRAMEBUFFER_DEFAULT', - 'GL_FRAMEBUFFER_UNDEFINED', - 'GL_DEPTH_STENCIL_ATTACHMENT', - 'GL_MAX_RENDERBUFFER_SIZE', - 'GL_DEPTH_STENCIL', - 'GL_UNSIGNED_INT_24_8', - 'GL_DEPTH24_STENCIL8', - 'GL_TEXTURE_STENCIL_SIZE', - 'GL_TEXTURE_RED_TYPE', - 'GL_TEXTURE_GREEN_TYPE', - 'GL_TEXTURE_BLUE_TYPE', - 'GL_TEXTURE_ALPHA_TYPE', - 'GL_TEXTURE_DEPTH_TYPE', - 'GL_UNSIGNED_NORMALIZED', - 'GL_FRAMEBUFFER_BINDING', - 'GL_DRAW_FRAMEBUFFER_BINDING', - 'GL_RENDERBUFFER_BINDING', - 'GL_READ_FRAMEBUFFER', - 'GL_DRAW_FRAMEBUFFER', - 'GL_READ_FRAMEBUFFER_BINDING', - 'GL_RENDERBUFFER_SAMPLES', - 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE', - 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME', - 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL', - 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE', - 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER', - 'GL_FRAMEBUFFER_COMPLETE', - 'GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT', - 'GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT', - 'GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER', - 'GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER', - 'GL_FRAMEBUFFER_UNSUPPORTED', - 'GL_MAX_COLOR_ATTACHMENTS', - 'GL_COLOR_ATTACHMENT0', - 'GL_COLOR_ATTACHMENT1', - 'GL_COLOR_ATTACHMENT2', - 'GL_COLOR_ATTACHMENT3', - 'GL_COLOR_ATTACHMENT4', - 'GL_COLOR_ATTACHMENT5', - 'GL_COLOR_ATTACHMENT6', - 'GL_COLOR_ATTACHMENT7', - 'GL_COLOR_ATTACHMENT8', - 'GL_COLOR_ATTACHMENT9', - 'GL_COLOR_ATTACHMENT10', - 'GL_COLOR_ATTACHMENT11', - 'GL_COLOR_ATTACHMENT12', - 'GL_COLOR_ATTACHMENT13', - 'GL_COLOR_ATTACHMENT14', - 'GL_COLOR_ATTACHMENT15', - 'GL_COLOR_ATTACHMENT16', - 'GL_COLOR_ATTACHMENT17', - 'GL_COLOR_ATTACHMENT18', - 'GL_COLOR_ATTACHMENT19', - 'GL_COLOR_ATTACHMENT20', - 'GL_COLOR_ATTACHMENT21', - 'GL_COLOR_ATTACHMENT22', - 'GL_COLOR_ATTACHMENT23', - 'GL_COLOR_ATTACHMENT24', - 'GL_COLOR_ATTACHMENT25', - 'GL_COLOR_ATTACHMENT26', - 'GL_COLOR_ATTACHMENT27', - 'GL_COLOR_ATTACHMENT28', - 'GL_COLOR_ATTACHMENT29', - 'GL_COLOR_ATTACHMENT30', - 'GL_COLOR_ATTACHMENT31', - 'GL_DEPTH_ATTACHMENT', - 'GL_STENCIL_ATTACHMENT', - 'GL_FRAMEBUFFER', - 'GL_RENDERBUFFER', - 'GL_RENDERBUFFER_WIDTH', - 'GL_RENDERBUFFER_HEIGHT', - 'GL_RENDERBUFFER_INTERNAL_FORMAT', - 'GL_STENCIL_INDEX1', - 'GL_STENCIL_INDEX4', - 'GL_STENCIL_INDEX8', - 'GL_STENCIL_INDEX16', - 'GL_RENDERBUFFER_RED_SIZE', - 'GL_RENDERBUFFER_GREEN_SIZE', - 'GL_RENDERBUFFER_BLUE_SIZE', - 'GL_RENDERBUFFER_ALPHA_SIZE', - 'GL_RENDERBUFFER_DEPTH_SIZE', - 'GL_RENDERBUFFER_STENCIL_SIZE', - 'GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE', - 'GL_MAX_SAMPLES', - 'GL_INDEX', - 'GL_TEXTURE_LUMINANCE_TYPE', - 'GL_TEXTURE_INTENSITY_TYPE', - 'GL_FRAMEBUFFER_SRGB', - 'GL_HALF_FLOAT', - 'GL_MAP_READ_BIT', - 'GL_MAP_WRITE_BIT', - 'GL_MAP_INVALIDATE_RANGE_BIT', - 'GL_MAP_INVALIDATE_BUFFER_BIT', - 'GL_MAP_FLUSH_EXPLICIT_BIT', - 'GL_MAP_UNSYNCHRONIZED_BIT', - 'GL_COMPRESSED_RED_RGTC1', - 'GL_COMPRESSED_SIGNED_RED_RGTC1', - 'GL_COMPRESSED_RG_RGTC2', - 'GL_COMPRESSED_SIGNED_RG_RGTC2', - 'GL_RG', - 'GL_RG_INTEGER', - 'GL_R8', - 'GL_R16', - 'GL_RG8', - 'GL_RG16', - 'GL_R16F', - 'GL_R32F', - 'GL_RG16F', - 'GL_RG32F', - 'GL_R8I', - 'GL_R8UI', - 'GL_R16I', - 'GL_R16UI', - 'GL_R32I', - 'GL_R32UI', - 'GL_RG8I', - 'GL_RG8UI', - 'GL_RG16I', - 'GL_RG16UI', - 'GL_RG32I', - 'GL_RG32UI', - 'GL_VERTEX_ARRAY_BINDING', - 'GL_CLAMP_VERTEX_COLOR', - 'GL_CLAMP_FRAGMENT_COLOR', - 'GL_ALPHA_INTEGER', - 'GL_SAMPLER_2D_RECT', - 'GL_SAMPLER_2D_RECT_SHADOW', - 'GL_SAMPLER_BUFFER', - 'GL_INT_SAMPLER_2D_RECT', - 'GL_INT_SAMPLER_BUFFER', - 'GL_UNSIGNED_INT_SAMPLER_2D_RECT', - 'GL_UNSIGNED_INT_SAMPLER_BUFFER', - 'GL_TEXTURE_BUFFER', - 'GL_MAX_TEXTURE_BUFFER_SIZE', - 'GL_TEXTURE_BINDING_BUFFER', - 'GL_TEXTURE_BUFFER_DATA_STORE_BINDING', - 'GL_TEXTURE_RECTANGLE', - 'GL_TEXTURE_BINDING_RECTANGLE', - 'GL_PROXY_TEXTURE_RECTANGLE', - 'GL_MAX_RECTANGLE_TEXTURE_SIZE', - 'GL_R8_SNORM', - 'GL_RG8_SNORM', - 'GL_RGB8_SNORM', - 'GL_RGBA8_SNORM', - 'GL_R16_SNORM', - 'GL_RG16_SNORM', - 'GL_RGB16_SNORM', - 'GL_RGBA16_SNORM', - 'GL_SIGNED_NORMALIZED', - 'GL_PRIMITIVE_RESTART', - 'GL_PRIMITIVE_RESTART_INDEX', - 'GL_COPY_READ_BUFFER', - 'GL_COPY_WRITE_BUFFER', - 'GL_UNIFORM_BUFFER', - 'GL_UNIFORM_BUFFER_BINDING', - 'GL_UNIFORM_BUFFER_START', - 'GL_UNIFORM_BUFFER_SIZE', - 'GL_MAX_VERTEX_UNIFORM_BLOCKS', - 'GL_MAX_GEOMETRY_UNIFORM_BLOCKS', - 'GL_MAX_FRAGMENT_UNIFORM_BLOCKS', - 'GL_MAX_COMBINED_UNIFORM_BLOCKS', - 'GL_MAX_UNIFORM_BUFFER_BINDINGS', - 'GL_MAX_UNIFORM_BLOCK_SIZE', - 'GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS', - 'GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS', - 'GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS', - 'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT', - 'GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH', - 'GL_ACTIVE_UNIFORM_BLOCKS', - 'GL_UNIFORM_TYPE', - 'GL_UNIFORM_SIZE', - 'GL_UNIFORM_NAME_LENGTH', - 'GL_UNIFORM_BLOCK_INDEX', - 'GL_UNIFORM_OFFSET', - 'GL_UNIFORM_ARRAY_STRIDE', - 'GL_UNIFORM_MATRIX_STRIDE', - 'GL_UNIFORM_IS_ROW_MAJOR', - 'GL_UNIFORM_BLOCK_BINDING', - 'GL_UNIFORM_BLOCK_DATA_SIZE', - 'GL_UNIFORM_BLOCK_NAME_LENGTH', - 'GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS', - 'GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES', - 'GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER', - 'GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER', - 'GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER', - 'GL_INVALID_INDEX', - 'GL_CONTEXT_CORE_PROFILE_BIT', - 'GL_CONTEXT_COMPATIBILITY_PROFILE_BIT', - 'GL_LINES_ADJACENCY', - 'GL_LINE_STRIP_ADJACENCY', - 'GL_TRIANGLES_ADJACENCY', - 'GL_TRIANGLE_STRIP_ADJACENCY', - 'GL_PROGRAM_POINT_SIZE', - 'GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS', - 'GL_FRAMEBUFFER_ATTACHMENT_LAYERED', - 'GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS', - 'GL_GEOMETRY_SHADER', - 'GL_GEOMETRY_VERTICES_OUT', - 'GL_GEOMETRY_INPUT_TYPE', - 'GL_GEOMETRY_OUTPUT_TYPE', - 'GL_MAX_GEOMETRY_UNIFORM_COMPONENTS', - 'GL_MAX_GEOMETRY_OUTPUT_VERTICES', - 'GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS', - 'GL_MAX_VERTEX_OUTPUT_COMPONENTS', - 'GL_MAX_GEOMETRY_INPUT_COMPONENTS', - 'GL_MAX_GEOMETRY_OUTPUT_COMPONENTS', - 'GL_MAX_FRAGMENT_INPUT_COMPONENTS', - 'GL_CONTEXT_PROFILE_MASK', - 'GL_DEPTH_CLAMP', - 'GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION', - 'GL_FIRST_VERTEX_CONVENTION', - 'GL_LAST_VERTEX_CONVENTION', - 'GL_PROVOKING_VERTEX', - 'GL_TEXTURE_CUBE_MAP_SEAMLESS', - 'GL_MAX_SERVER_WAIT_TIMEOUT', - 'GL_OBJECT_TYPE', - 'GL_SYNC_CONDITION', - 'GL_SYNC_STATUS', - 'GL_SYNC_FLAGS', - 'GL_SYNC_FENCE', - 'GL_SYNC_GPU_COMMANDS_COMPLETE', - 'GL_UNSIGNALED', - 'GL_SIGNALED', - 'GL_ALREADY_SIGNALED', - 'GL_TIMEOUT_EXPIRED', - 'GL_CONDITION_SATISFIED', - 'GL_WAIT_FAILED', - 'GL_TIMEOUT_IGNORED', - 'GL_SYNC_FLUSH_COMMANDS_BIT', - 'GL_SAMPLE_POSITION', - 'GL_SAMPLE_MASK', - 'GL_SAMPLE_MASK_VALUE', - 'GL_MAX_SAMPLE_MASK_WORDS', - 'GL_TEXTURE_2D_MULTISAMPLE', - 'GL_PROXY_TEXTURE_2D_MULTISAMPLE', - 'GL_TEXTURE_2D_MULTISAMPLE_ARRAY', - 'GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY', - 'GL_TEXTURE_BINDING_2D_MULTISAMPLE', - 'GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY', - 'GL_TEXTURE_SAMPLES', - 'GL_TEXTURE_FIXED_SAMPLE_LOCATIONS', - 'GL_SAMPLER_2D_MULTISAMPLE', - 'GL_INT_SAMPLER_2D_MULTISAMPLE', - 'GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE', - 'GL_SAMPLER_2D_MULTISAMPLE_ARRAY', - 'GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY', - 'GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY', - 'GL_MAX_COLOR_TEXTURE_SAMPLES', - 'GL_MAX_DEPTH_TEXTURE_SAMPLES', - 'GL_MAX_INTEGER_SAMPLES', - 'GL_VERTEX_ATTRIB_ARRAY_DIVISOR', - 'GL_SRC1_COLOR', - 'GL_ONE_MINUS_SRC1_COLOR', - 'GL_ONE_MINUS_SRC1_ALPHA', - 'GL_MAX_DUAL_SOURCE_DRAW_BUFFERS', - 'GL_ANY_SAMPLES_PASSED', - 'GL_SAMPLER_BINDING', - 'GL_RGB10_A2UI', - 'GL_TEXTURE_SWIZZLE_R', - 'GL_TEXTURE_SWIZZLE_G', - 'GL_TEXTURE_SWIZZLE_B', - 'GL_TEXTURE_SWIZZLE_A', - 'GL_TEXTURE_SWIZZLE_RGBA', - 'GL_TIME_ELAPSED', - 'GL_TIMESTAMP', - 'GL_INT_2_10_10_10_REV', - 'GL_SAMPLE_SHADING', - 'GL_MIN_SAMPLE_SHADING_VALUE', - 'GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET', - 'GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET', - 'GL_TEXTURE_CUBE_MAP_ARRAY', - 'GL_TEXTURE_BINDING_CUBE_MAP_ARRAY', - 'GL_PROXY_TEXTURE_CUBE_MAP_ARRAY', - 'GL_SAMPLER_CUBE_MAP_ARRAY', - 'GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW', - 'GL_INT_SAMPLER_CUBE_MAP_ARRAY', - 'GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY', - 'GL_DRAW_INDIRECT_BUFFER', - 'GL_DRAW_INDIRECT_BUFFER_BINDING', - 'GL_GEOMETRY_SHADER_INVOCATIONS', - 'GL_MAX_GEOMETRY_SHADER_INVOCATIONS', - 'GL_MIN_FRAGMENT_INTERPOLATION_OFFSET', - 'GL_MAX_FRAGMENT_INTERPOLATION_OFFSET', - 'GL_FRAGMENT_INTERPOLATION_OFFSET_BITS', - 'GL_MAX_VERTEX_STREAMS', - 'GL_DOUBLE_VEC2', - 'GL_DOUBLE_VEC3', - 'GL_DOUBLE_VEC4', - 'GL_DOUBLE_MAT2', - 'GL_DOUBLE_MAT3', - 'GL_DOUBLE_MAT4', - 'GL_DOUBLE_MAT2x3', - 'GL_DOUBLE_MAT2x4', - 'GL_DOUBLE_MAT3x2', - 'GL_DOUBLE_MAT3x4', - 'GL_DOUBLE_MAT4x2', - 'GL_DOUBLE_MAT4x3', - 'GL_ACTIVE_SUBROUTINES', - 'GL_ACTIVE_SUBROUTINE_UNIFORMS', - 'GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS', - 'GL_ACTIVE_SUBROUTINE_MAX_LENGTH', - 'GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH', - 'GL_MAX_SUBROUTINES', - 'GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS', - 'GL_NUM_COMPATIBLE_SUBROUTINES', - 'GL_COMPATIBLE_SUBROUTINES', - 'GL_PATCHES', - 'GL_PATCH_VERTICES', - 'GL_PATCH_DEFAULT_INNER_LEVEL', - 'GL_PATCH_DEFAULT_OUTER_LEVEL', - 'GL_TESS_CONTROL_OUTPUT_VERTICES', - 'GL_TESS_GEN_MODE', - 'GL_TESS_GEN_SPACING', - 'GL_TESS_GEN_VERTEX_ORDER', - 'GL_TESS_GEN_POINT_MODE', - 'GL_ISOLINES', - 'GL_FRACTIONAL_ODD', - 'GL_FRACTIONAL_EVEN', - 'GL_MAX_PATCH_VERTICES', - 'GL_MAX_TESS_GEN_LEVEL', - 'GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS', - 'GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS', - 'GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS', - 'GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS', - 'GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS', - 'GL_MAX_TESS_PATCH_COMPONENTS', - 'GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS', - 'GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS', - 'GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS', - 'GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS', - 'GL_MAX_TESS_CONTROL_INPUT_COMPONENTS', - 'GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS', - 'GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS', - 'GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS', - 'GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER', - 'GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER', - 'GL_TESS_EVALUATION_SHADER', - 'GL_TESS_CONTROL_SHADER', - 'GL_TRANSFORM_FEEDBACK', - 'GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED', - 'GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE', - 'GL_TRANSFORM_FEEDBACK_BINDING', - 'GL_MAX_TRANSFORM_FEEDBACK_BUFFERS', - 'GL_FIXED', - 'GL_IMPLEMENTATION_COLOR_READ_TYPE', - 'GL_IMPLEMENTATION_COLOR_READ_FORMAT', - 'GL_LOW_FLOAT', - 'GL_MEDIUM_FLOAT', - 'GL_HIGH_FLOAT', - 'GL_LOW_INT', - 'GL_MEDIUM_INT', - 'GL_HIGH_INT', - 'GL_SHADER_COMPILER', - 'GL_SHADER_BINARY_FORMATS', - 'GL_NUM_SHADER_BINARY_FORMATS', - 'GL_MAX_VERTEX_UNIFORM_VECTORS', - 'GL_MAX_VARYING_VECTORS', - 'GL_MAX_FRAGMENT_UNIFORM_VECTORS', - 'GL_RGB565', - 'GL_PROGRAM_BINARY_RETRIEVABLE_HINT', - 'GL_PROGRAM_BINARY_LENGTH', - 'GL_NUM_PROGRAM_BINARY_FORMATS', - 'GL_PROGRAM_BINARY_FORMATS', - 'GL_VERTEX_SHADER_BIT', - 'GL_FRAGMENT_SHADER_BIT', - 'GL_GEOMETRY_SHADER_BIT', - 'GL_TESS_CONTROL_SHADER_BIT', - 'GL_TESS_EVALUATION_SHADER_BIT', - 'GL_ALL_SHADER_BITS', - 'GL_PROGRAM_SEPARABLE', - 'GL_ACTIVE_PROGRAM', - 'GL_PROGRAM_PIPELINE_BINDING', - 'GL_MAX_VIEWPORTS', - 'GL_VIEWPORT_SUBPIXEL_BITS', - 'GL_VIEWPORT_BOUNDS_RANGE', - 'GL_LAYER_PROVOKING_VERTEX', - 'GL_VIEWPORT_INDEX_PROVOKING_VERTEX', - 'GL_UNDEFINED_VERTEX', - 'GL_COPY_READ_BUFFER_BINDING', - 'GL_COPY_WRITE_BUFFER_BINDING', - 'GL_TRANSFORM_FEEDBACK_ACTIVE', - 'GL_TRANSFORM_FEEDBACK_PAUSED', - 'GL_UNPACK_COMPRESSED_BLOCK_WIDTH', - 'GL_UNPACK_COMPRESSED_BLOCK_HEIGHT', - 'GL_UNPACK_COMPRESSED_BLOCK_DEPTH', - 'GL_UNPACK_COMPRESSED_BLOCK_SIZE', - 'GL_PACK_COMPRESSED_BLOCK_WIDTH', - 'GL_PACK_COMPRESSED_BLOCK_HEIGHT', - 'GL_PACK_COMPRESSED_BLOCK_DEPTH', - 'GL_PACK_COMPRESSED_BLOCK_SIZE', - 'GL_NUM_SAMPLE_COUNTS', - 'GL_MIN_MAP_BUFFER_ALIGNMENT', - 'GL_ATOMIC_COUNTER_BUFFER', - 'GL_ATOMIC_COUNTER_BUFFER_BINDING', - 'GL_ATOMIC_COUNTER_BUFFER_START', - 'GL_ATOMIC_COUNTER_BUFFER_SIZE', - 'GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE', - 'GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS', - 'GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES', - 'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER', - 'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER', - 'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER', - 'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER', - 'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER', - 'GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_VERTEX_ATOMIC_COUNTERS', - 'GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS', - 'GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS', - 'GL_MAX_GEOMETRY_ATOMIC_COUNTERS', - 'GL_MAX_FRAGMENT_ATOMIC_COUNTERS', - 'GL_MAX_COMBINED_ATOMIC_COUNTERS', - 'GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE', - 'GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS', - 'GL_ACTIVE_ATOMIC_COUNTER_BUFFERS', - 'GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX', - 'GL_UNSIGNED_INT_ATOMIC_COUNTER', - 'GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT', - 'GL_ELEMENT_ARRAY_BARRIER_BIT', - 'GL_UNIFORM_BARRIER_BIT', - 'GL_TEXTURE_FETCH_BARRIER_BIT', - 'GL_SHADER_IMAGE_ACCESS_BARRIER_BIT', - 'GL_COMMAND_BARRIER_BIT', - 'GL_PIXEL_BUFFER_BARRIER_BIT', - 'GL_TEXTURE_UPDATE_BARRIER_BIT', - 'GL_BUFFER_UPDATE_BARRIER_BIT', - 'GL_FRAMEBUFFER_BARRIER_BIT', - 'GL_TRANSFORM_FEEDBACK_BARRIER_BIT', - 'GL_ATOMIC_COUNTER_BARRIER_BIT', - 'GL_ALL_BARRIER_BITS', - 'GL_MAX_IMAGE_UNITS', - 'GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS', - 'GL_IMAGE_BINDING_NAME', - 'GL_IMAGE_BINDING_LEVEL', - 'GL_IMAGE_BINDING_LAYERED', - 'GL_IMAGE_BINDING_LAYER', - 'GL_IMAGE_BINDING_ACCESS', - 'GL_IMAGE_1D', - 'GL_IMAGE_2D', - 'GL_IMAGE_3D', - 'GL_IMAGE_2D_RECT', - 'GL_IMAGE_CUBE', - 'GL_IMAGE_BUFFER', - 'GL_IMAGE_1D_ARRAY', - 'GL_IMAGE_2D_ARRAY', - 'GL_IMAGE_CUBE_MAP_ARRAY', - 'GL_IMAGE_2D_MULTISAMPLE', - 'GL_IMAGE_2D_MULTISAMPLE_ARRAY', - 'GL_INT_IMAGE_1D', - 'GL_INT_IMAGE_2D', - 'GL_INT_IMAGE_3D', - 'GL_INT_IMAGE_2D_RECT', - 'GL_INT_IMAGE_CUBE', - 'GL_INT_IMAGE_BUFFER', - 'GL_INT_IMAGE_1D_ARRAY', - 'GL_INT_IMAGE_2D_ARRAY', - 'GL_INT_IMAGE_CUBE_MAP_ARRAY', - 'GL_INT_IMAGE_2D_MULTISAMPLE', - 'GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY', - 'GL_UNSIGNED_INT_IMAGE_1D', - 'GL_UNSIGNED_INT_IMAGE_2D', - 'GL_UNSIGNED_INT_IMAGE_3D', - 'GL_UNSIGNED_INT_IMAGE_2D_RECT', - 'GL_UNSIGNED_INT_IMAGE_CUBE', - 'GL_UNSIGNED_INT_IMAGE_BUFFER', - 'GL_UNSIGNED_INT_IMAGE_1D_ARRAY', - 'GL_UNSIGNED_INT_IMAGE_2D_ARRAY', - 'GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY', - 'GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE', - 'GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY', - 'GL_MAX_IMAGE_SAMPLES', - 'GL_IMAGE_BINDING_FORMAT', - 'GL_IMAGE_FORMAT_COMPATIBILITY_TYPE', - 'GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE', - 'GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS', - 'GL_MAX_VERTEX_IMAGE_UNIFORMS', - 'GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS', - 'GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS', - 'GL_MAX_GEOMETRY_IMAGE_UNIFORMS', - 'GL_MAX_FRAGMENT_IMAGE_UNIFORMS', - 'GL_MAX_COMBINED_IMAGE_UNIFORMS', - 'GL_COMPRESSED_RGBA_BPTC_UNORM', - 'GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM', - 'GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT', - 'GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT', - 'GL_TEXTURE_IMMUTABLE_FORMAT', - 'GL_NUM_SHADING_LANGUAGE_VERSIONS', - 'GL_VERTEX_ATTRIB_ARRAY_LONG', - 'GL_COMPRESSED_RGB8_ETC2', - 'GL_COMPRESSED_SRGB8_ETC2', - 'GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2', - 'GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2', - 'GL_COMPRESSED_RGBA8_ETC2_EAC', - 'GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC', - 'GL_COMPRESSED_R11_EAC', - 'GL_COMPRESSED_SIGNED_R11_EAC', - 'GL_COMPRESSED_RG11_EAC', - 'GL_COMPRESSED_SIGNED_RG11_EAC', - 'GL_PRIMITIVE_RESTART_FIXED_INDEX', - 'GL_ANY_SAMPLES_PASSED_CONSERVATIVE', - 'GL_MAX_ELEMENT_INDEX', - 'GL_COMPUTE_SHADER', - 'GL_MAX_COMPUTE_UNIFORM_BLOCKS', - 'GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS', - 'GL_MAX_COMPUTE_IMAGE_UNIFORMS', - 'GL_MAX_COMPUTE_SHARED_MEMORY_SIZE', - 'GL_MAX_COMPUTE_UNIFORM_COMPONENTS', - 'GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS', - 'GL_MAX_COMPUTE_ATOMIC_COUNTERS', - 'GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS', - 'GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS', - 'GL_MAX_COMPUTE_WORK_GROUP_COUNT', - 'GL_MAX_COMPUTE_WORK_GROUP_SIZE', - 'GL_COMPUTE_WORK_GROUP_SIZE', - 'GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER', - 'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER', - 'GL_DISPATCH_INDIRECT_BUFFER', - 'GL_DISPATCH_INDIRECT_BUFFER_BINDING', - 'GL_COMPUTE_SHADER_BIT', - 'GL_DEBUG_OUTPUT_SYNCHRONOUS', - 'GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH', - 'GL_DEBUG_CALLBACK_FUNCTION', - 'GL_DEBUG_CALLBACK_USER_PARAM', - 'GL_DEBUG_SOURCE_API', - 'GL_DEBUG_SOURCE_WINDOW_SYSTEM', - 'GL_DEBUG_SOURCE_SHADER_COMPILER', - 'GL_DEBUG_SOURCE_THIRD_PARTY', - 'GL_DEBUG_SOURCE_APPLICATION', - 'GL_DEBUG_SOURCE_OTHER', - 'GL_DEBUG_TYPE_ERROR', - 'GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR', - 'GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR', - 'GL_DEBUG_TYPE_PORTABILITY', - 'GL_DEBUG_TYPE_PERFORMANCE', - 'GL_DEBUG_TYPE_OTHER', - 'GL_MAX_DEBUG_MESSAGE_LENGTH', - 'GL_MAX_DEBUG_LOGGED_MESSAGES', - 'GL_DEBUG_LOGGED_MESSAGES', - 'GL_DEBUG_SEVERITY_HIGH', - 'GL_DEBUG_SEVERITY_MEDIUM', - 'GL_DEBUG_SEVERITY_LOW', - 'GL_DEBUG_TYPE_MARKER', - 'GL_DEBUG_TYPE_PUSH_GROUP', - 'GL_DEBUG_TYPE_POP_GROUP', - 'GL_DEBUG_SEVERITY_NOTIFICATION', - 'GL_MAX_DEBUG_GROUP_STACK_DEPTH', - 'GL_DEBUG_GROUP_STACK_DEPTH', - 'GL_BUFFER', - 'GL_SHADER', - 'GL_PROGRAM', - 'GL_QUERY', - 'GL_PROGRAM_PIPELINE', - 'GL_SAMPLER', - 'GL_MAX_LABEL_LENGTH', - 'GL_DEBUG_OUTPUT', - 'GL_CONTEXT_FLAG_DEBUG_BIT', - 'GL_MAX_UNIFORM_LOCATIONS', - 'GL_FRAMEBUFFER_DEFAULT_WIDTH', - 'GL_FRAMEBUFFER_DEFAULT_HEIGHT', - 'GL_FRAMEBUFFER_DEFAULT_LAYERS', - 'GL_FRAMEBUFFER_DEFAULT_SAMPLES', - 'GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS', - 'GL_MAX_FRAMEBUFFER_WIDTH', - 'GL_MAX_FRAMEBUFFER_HEIGHT', - 'GL_MAX_FRAMEBUFFER_LAYERS', - 'GL_MAX_FRAMEBUFFER_SAMPLES', - 'GL_INTERNALFORMAT_SUPPORTED', - 'GL_INTERNALFORMAT_PREFERRED', - 'GL_INTERNALFORMAT_RED_SIZE', - 'GL_INTERNALFORMAT_GREEN_SIZE', - 'GL_INTERNALFORMAT_BLUE_SIZE', - 'GL_INTERNALFORMAT_ALPHA_SIZE', - 'GL_INTERNALFORMAT_DEPTH_SIZE', - 'GL_INTERNALFORMAT_STENCIL_SIZE', - 'GL_INTERNALFORMAT_SHARED_SIZE', - 'GL_INTERNALFORMAT_RED_TYPE', - 'GL_INTERNALFORMAT_GREEN_TYPE', - 'GL_INTERNALFORMAT_BLUE_TYPE', - 'GL_INTERNALFORMAT_ALPHA_TYPE', - 'GL_INTERNALFORMAT_DEPTH_TYPE', - 'GL_INTERNALFORMAT_STENCIL_TYPE', - 'GL_MAX_WIDTH', - 'GL_MAX_HEIGHT', - 'GL_MAX_DEPTH', - 'GL_MAX_LAYERS', - 'GL_MAX_COMBINED_DIMENSIONS', - 'GL_COLOR_COMPONENTS', - 'GL_DEPTH_COMPONENTS', - 'GL_STENCIL_COMPONENTS', - 'GL_COLOR_RENDERABLE', - 'GL_DEPTH_RENDERABLE', - 'GL_STENCIL_RENDERABLE', - 'GL_FRAMEBUFFER_RENDERABLE', - 'GL_FRAMEBUFFER_RENDERABLE_LAYERED', - 'GL_FRAMEBUFFER_BLEND', - 'GL_READ_PIXELS', - 'GL_READ_PIXELS_FORMAT', - 'GL_READ_PIXELS_TYPE', - 'GL_TEXTURE_IMAGE_FORMAT', - 'GL_TEXTURE_IMAGE_TYPE', - 'GL_GET_TEXTURE_IMAGE_FORMAT', - 'GL_GET_TEXTURE_IMAGE_TYPE', - 'GL_MIPMAP', - 'GL_MANUAL_GENERATE_MIPMAP', - 'GL_AUTO_GENERATE_MIPMAP', - 'GL_COLOR_ENCODING', - 'GL_SRGB_READ', - 'GL_SRGB_WRITE', - 'GL_FILTER', - 'GL_VERTEX_TEXTURE', - 'GL_TESS_CONTROL_TEXTURE', - 'GL_TESS_EVALUATION_TEXTURE', - 'GL_GEOMETRY_TEXTURE', - 'GL_FRAGMENT_TEXTURE', - 'GL_COMPUTE_TEXTURE', - 'GL_TEXTURE_SHADOW', - 'GL_TEXTURE_GATHER', - 'GL_TEXTURE_GATHER_SHADOW', - 'GL_SHADER_IMAGE_LOAD', - 'GL_SHADER_IMAGE_STORE', - 'GL_SHADER_IMAGE_ATOMIC', - 'GL_IMAGE_TEXEL_SIZE', - 'GL_IMAGE_COMPATIBILITY_CLASS', - 'GL_IMAGE_PIXEL_FORMAT', - 'GL_IMAGE_PIXEL_TYPE', - 'GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST', - 'GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST', - 'GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE', - 'GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE', - 'GL_TEXTURE_COMPRESSED_BLOCK_WIDTH', - 'GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT', - 'GL_TEXTURE_COMPRESSED_BLOCK_SIZE', - 'GL_CLEAR_BUFFER', - 'GL_TEXTURE_VIEW', - 'GL_VIEW_COMPATIBILITY_CLASS', - 'GL_FULL_SUPPORT', - 'GL_CAVEAT_SUPPORT', - 'GL_IMAGE_CLASS_4_X_32', - 'GL_IMAGE_CLASS_2_X_32', - 'GL_IMAGE_CLASS_1_X_32', - 'GL_IMAGE_CLASS_4_X_16', - 'GL_IMAGE_CLASS_2_X_16', - 'GL_IMAGE_CLASS_1_X_16', - 'GL_IMAGE_CLASS_4_X_8', - 'GL_IMAGE_CLASS_2_X_8', - 'GL_IMAGE_CLASS_1_X_8', - 'GL_IMAGE_CLASS_11_11_10', - 'GL_IMAGE_CLASS_10_10_10_2', - 'GL_VIEW_CLASS_128_BITS', - 'GL_VIEW_CLASS_96_BITS', - 'GL_VIEW_CLASS_64_BITS', - 'GL_VIEW_CLASS_48_BITS', - 'GL_VIEW_CLASS_32_BITS', - 'GL_VIEW_CLASS_24_BITS', - 'GL_VIEW_CLASS_16_BITS', - 'GL_VIEW_CLASS_8_BITS', - 'GL_VIEW_CLASS_S3TC_DXT1_RGB', - 'GL_VIEW_CLASS_S3TC_DXT1_RGBA', - 'GL_VIEW_CLASS_S3TC_DXT3_RGBA', - 'GL_VIEW_CLASS_S3TC_DXT5_RGBA', - 'GL_VIEW_CLASS_RGTC1_RED', - 'GL_VIEW_CLASS_RGTC2_RG', - 'GL_VIEW_CLASS_BPTC_UNORM', - 'GL_VIEW_CLASS_BPTC_FLOAT', - 'GL_UNIFORM', - 'GL_UNIFORM_BLOCK', - 'GL_PROGRAM_INPUT', - 'GL_PROGRAM_OUTPUT', - 'GL_BUFFER_VARIABLE', - 'GL_SHADER_STORAGE_BLOCK', - 'GL_VERTEX_SUBROUTINE', - 'GL_TESS_CONTROL_SUBROUTINE', - 'GL_TESS_EVALUATION_SUBROUTINE', - 'GL_GEOMETRY_SUBROUTINE', - 'GL_FRAGMENT_SUBROUTINE', - 'GL_COMPUTE_SUBROUTINE', - 'GL_VERTEX_SUBROUTINE_UNIFORM', - 'GL_TESS_CONTROL_SUBROUTINE_UNIFORM', - 'GL_TESS_EVALUATION_SUBROUTINE_UNIFORM', - 'GL_GEOMETRY_SUBROUTINE_UNIFORM', - 'GL_FRAGMENT_SUBROUTINE_UNIFORM', - 'GL_COMPUTE_SUBROUTINE_UNIFORM', - 'GL_TRANSFORM_FEEDBACK_VARYING', - 'GL_ACTIVE_RESOURCES', - 'GL_MAX_NAME_LENGTH', - 'GL_MAX_NUM_ACTIVE_VARIABLES', - 'GL_MAX_NUM_COMPATIBLE_SUBROUTINES', - 'GL_NAME_LENGTH', - 'GL_TYPE', - 'GL_ARRAY_SIZE', - 'GL_OFFSET', - 'GL_BLOCK_INDEX', - 'GL_ARRAY_STRIDE', - 'GL_MATRIX_STRIDE', - 'GL_IS_ROW_MAJOR', - 'GL_ATOMIC_COUNTER_BUFFER_INDEX', - 'GL_BUFFER_BINDING', - 'GL_BUFFER_DATA_SIZE', - 'GL_NUM_ACTIVE_VARIABLES', - 'GL_ACTIVE_VARIABLES', - 'GL_REFERENCED_BY_VERTEX_SHADER', - 'GL_REFERENCED_BY_TESS_CONTROL_SHADER', - 'GL_REFERENCED_BY_TESS_EVALUATION_SHADER', - 'GL_REFERENCED_BY_GEOMETRY_SHADER', - 'GL_REFERENCED_BY_FRAGMENT_SHADER', - 'GL_REFERENCED_BY_COMPUTE_SHADER', - 'GL_TOP_LEVEL_ARRAY_SIZE', - 'GL_TOP_LEVEL_ARRAY_STRIDE', - 'GL_LOCATION', - 'GL_LOCATION_INDEX', - 'GL_IS_PER_PATCH', - 'GL_SHADER_STORAGE_BUFFER', - 'GL_SHADER_STORAGE_BUFFER_BINDING', - 'GL_SHADER_STORAGE_BUFFER_START', - 'GL_SHADER_STORAGE_BUFFER_SIZE', - 'GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS', - 'GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS', - 'GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS', - 'GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS', - 'GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS', - 'GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS', - 'GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS', - 'GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS', - 'GL_MAX_SHADER_STORAGE_BLOCK_SIZE', - 'GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT', - 'GL_SHADER_STORAGE_BARRIER_BIT', - 'GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES', - 'GL_DEPTH_STENCIL_TEXTURE_MODE', - 'GL_TEXTURE_BUFFER_OFFSET', - 'GL_TEXTURE_BUFFER_SIZE', - 'GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT', - 'GL_TEXTURE_VIEW_MIN_LEVEL', - 'GL_TEXTURE_VIEW_NUM_LEVELS', - 'GL_TEXTURE_VIEW_MIN_LAYER', - 'GL_TEXTURE_VIEW_NUM_LAYERS', - 'GL_TEXTURE_IMMUTABLE_LEVELS', - 'GL_VERTEX_ATTRIB_BINDING', - 'GL_VERTEX_ATTRIB_RELATIVE_OFFSET', - 'GL_VERTEX_BINDING_DIVISOR', - 'GL_VERTEX_BINDING_OFFSET', - 'GL_VERTEX_BINDING_STRIDE', - 'GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET', - 'GL_MAX_VERTEX_ATTRIB_BINDINGS', - 'GL_VERTEX_BINDING_BUFFER', - 'GL_DISPLAY_LIST', - 'GL_MAX_VERTEX_ATTRIB_STRIDE', - 'GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED', - 'GL_TEXTURE_BUFFER_BINDING', - 'GL_MAP_PERSISTENT_BIT', - 'GL_MAP_COHERENT_BIT', - 'GL_DYNAMIC_STORAGE_BIT', - 'GL_CLIENT_STORAGE_BIT', - 'GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT', - 'GL_BUFFER_IMMUTABLE_STORAGE', - 'GL_BUFFER_STORAGE_FLAGS', - 'GL_CLEAR_TEXTURE', - 'GL_LOCATION_COMPONENT', - 'GL_TRANSFORM_FEEDBACK_BUFFER_INDEX', - 'GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE', - 'GL_QUERY_BUFFER', - 'GL_QUERY_BUFFER_BARRIER_BIT', - 'GL_QUERY_BUFFER_BINDING', - 'GL_QUERY_RESULT_NO_WAIT', - 'GL_MIRROR_CLAMP_TO_EDGE', - 'GL_CONTEXT_LOST', - 'GL_NEGATIVE_ONE_TO_ONE', - 'GL_ZERO_TO_ONE', - 'GL_CLIP_ORIGIN', - 'GL_CLIP_DEPTH_MODE', - 'GL_QUERY_WAIT_INVERTED', - 'GL_QUERY_NO_WAIT_INVERTED', - 'GL_QUERY_BY_REGION_WAIT_INVERTED', - 'GL_QUERY_BY_REGION_NO_WAIT_INVERTED', - 'GL_MAX_CULL_DISTANCES', - 'GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES', - 'GL_TEXTURE_TARGET', - 'GL_QUERY_TARGET', - 'GL_GUILTY_CONTEXT_RESET', - 'GL_INNOCENT_CONTEXT_RESET', - 'GL_UNKNOWN_CONTEXT_RESET', - 'GL_RESET_NOTIFICATION_STRATEGY', - 'GL_LOSE_CONTEXT_ON_RESET', - 'GL_NO_RESET_NOTIFICATION', - 'GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT', - 'GL_COLOR_TABLE', - 'GL_POST_CONVOLUTION_COLOR_TABLE', - 'GL_POST_COLOR_MATRIX_COLOR_TABLE', - 'GL_PROXY_COLOR_TABLE', - 'GL_PROXY_POST_CONVOLUTION_COLOR_TABLE', - 'GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE', - 'GL_CONVOLUTION_1D', - 'GL_CONVOLUTION_2D', - 'GL_SEPARABLE_2D', - 'GL_HISTOGRAM', - 'GL_PROXY_HISTOGRAM', - 'GL_MINMAX', - 'GL_CONTEXT_RELEASE_BEHAVIOR', - 'GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH', - 'GL_SHADER_BINARY_FORMAT_SPIR_V', - 'GL_SPIR_V_BINARY', - 'GL_PARAMETER_BUFFER', - 'GL_PARAMETER_BUFFER_BINDING', - 'GL_CONTEXT_FLAG_NO_ERROR_BIT', - 'GL_VERTICES_SUBMITTED', - 'GL_PRIMITIVES_SUBMITTED', - 'GL_VERTEX_SHADER_INVOCATIONS', - 'GL_TESS_CONTROL_SHADER_PATCHES', - 'GL_TESS_EVALUATION_SHADER_INVOCATIONS', - 'GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED', - 'GL_FRAGMENT_SHADER_INVOCATIONS', - 'GL_COMPUTE_SHADER_INVOCATIONS', - 'GL_CLIPPING_INPUT_PRIMITIVES', - 'GL_CLIPPING_OUTPUT_PRIMITIVES', - 'GL_POLYGON_OFFSET_CLAMP', - 'GL_SPIR_V_EXTENSIONS', - 'GL_NUM_SPIR_V_EXTENSIONS', - 'GL_TEXTURE_MAX_ANISOTROPY', - 'GL_MAX_TEXTURE_MAX_ANISOTROPY', - 'GL_TRANSFORM_FEEDBACK_OVERFLOW', - 'GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW', - 'GL_MULTISAMPLE_ARB', - 'GL_SAMPLE_ALPHA_TO_COVERAGE_ARB', - 'GL_SAMPLE_ALPHA_TO_ONE_ARB', - 'GL_SAMPLE_COVERAGE_ARB', - 'GL_SAMPLE_BUFFERS_ARB', - 'GL_SAMPLES_ARB', - 'GL_SAMPLE_COVERAGE_VALUE_ARB', - 'GL_SAMPLE_COVERAGE_INVERT_ARB', - 'GL_MULTISAMPLE_BIT_ARB', - 'GL_COMPRESSED_RGB_S3TC_DXT1_EXT', - 'GL_COMPRESSED_RGBA_S3TC_DXT1_EXT', - 'GL_COMPRESSED_RGBA_S3TC_DXT3_EXT', - 'GL_COMPRESSED_RGBA_S3TC_DXT5_EXT', - 'GL_INVALID_FRAMEBUFFER_OPERATION_EXT', - 'GL_MAX_RENDERBUFFER_SIZE_EXT', - 'GL_FRAMEBUFFER_BINDING_EXT', - 'GL_RENDERBUFFER_BINDING_EXT', - 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT', - 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT', - 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT', - 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT', - 'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT', - 'GL_FRAMEBUFFER_COMPLETE_EXT', - 'GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT', - 'GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT', - 'GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT', - 'GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT', - 'GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT', - 'GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT', - 'GL_FRAMEBUFFER_UNSUPPORTED_EXT', - 'GL_MAX_COLOR_ATTACHMENTS_EXT', - 'GL_COLOR_ATTACHMENT0_EXT', - 'GL_COLOR_ATTACHMENT1_EXT', - 'GL_COLOR_ATTACHMENT2_EXT', - 'GL_COLOR_ATTACHMENT3_EXT', - 'GL_COLOR_ATTACHMENT4_EXT', - 'GL_COLOR_ATTACHMENT5_EXT', - 'GL_COLOR_ATTACHMENT6_EXT', - 'GL_COLOR_ATTACHMENT7_EXT', - 'GL_COLOR_ATTACHMENT8_EXT', - 'GL_COLOR_ATTACHMENT9_EXT', - 'GL_COLOR_ATTACHMENT10_EXT', - 'GL_COLOR_ATTACHMENT11_EXT', - 'GL_COLOR_ATTACHMENT12_EXT', - 'GL_COLOR_ATTACHMENT13_EXT', - 'GL_COLOR_ATTACHMENT14_EXT', - 'GL_COLOR_ATTACHMENT15_EXT', - 'GL_DEPTH_ATTACHMENT_EXT', - 'GL_STENCIL_ATTACHMENT_EXT', - 'GL_FRAMEBUFFER_EXT', - 'GL_RENDERBUFFER_EXT', - 'GL_RENDERBUFFER_WIDTH_EXT', - 'GL_RENDERBUFFER_HEIGHT_EXT', - 'GL_RENDERBUFFER_INTERNAL_FORMAT_EXT', - 'GL_STENCIL_INDEX1_EXT', - 'GL_STENCIL_INDEX4_EXT', - 'GL_STENCIL_INDEX8_EXT', - 'GL_STENCIL_INDEX16_EXT', - 'GL_RENDERBUFFER_RED_SIZE_EXT', - 'GL_RENDERBUFFER_GREEN_SIZE_EXT', - 'GL_RENDERBUFFER_BLUE_SIZE_EXT', - 'GL_RENDERBUFFER_ALPHA_SIZE_EXT', - 'GL_RENDERBUFFER_DEPTH_SIZE_EXT', - 'GL_RENDERBUFFER_STENCIL_SIZE_EXT', - 'glAccum', - 'glActiveShaderProgram', - 'glActiveTexture', - 'glAlphaFunc', - 'glAreTexturesResident', - 'glArrayElement', - 'glAttachShader', - 'glBegin', - 'glBeginConditionalRender', - 'glBeginQuery', - 'glBeginQueryIndexed', - 'glBeginTransformFeedback', - 'glBindAttribLocation', - 'glBindBuffer', - 'glBindBufferBase', - 'glBindBufferRange', - 'glBindBuffersBase', - 'glBindBuffersRange', - 'glBindFragDataLocation', - 'glBindFragDataLocationIndexed', - 'glBindFramebuffer', - 'glBindFramebufferEXT', - 'glBindImageTexture', - 'glBindImageTextures', - 'glBindProgramPipeline', - 'glBindRenderbuffer', - 'glBindRenderbufferEXT', - 'glBindSampler', - 'glBindSamplers', - 'glBindTexture', - 'glBindTextureUnit', - 'glBindTextures', - 'glBindTransformFeedback', - 'glBindVertexArray', - 'glBindVertexBuffer', - 'glBindVertexBuffers', - 'glBitmap', - 'glBlendColor', - 'glBlendEquation', - 'glBlendEquationSeparate', - 'glBlendEquationSeparatei', - 'glBlendEquationi', - 'glBlendFunc', - 'glBlendFuncSeparate', - 'glBlendFuncSeparatei', - 'glBlendFunci', - 'glBlitFramebuffer', - 'glBlitNamedFramebuffer', - 'glBufferData', - 'glBufferStorage', - 'glBufferSubData', - 'glCallList', - 'glCallLists', - 'glCheckFramebufferStatus', - 'glCheckFramebufferStatusEXT', - 'glCheckNamedFramebufferStatus', - 'glClampColor', - 'glClear', - 'glClearAccum', - 'glClearBufferData', - 'glClearBufferSubData', - 'glClearBufferfi', - 'glClearBufferfv', - 'glClearBufferiv', - 'glClearBufferuiv', - 'glClearColor', - 'glClearDepth', - 'glClearDepthf', - 'glClearIndex', - 'glClearNamedBufferData', - 'glClearNamedBufferSubData', - 'glClearNamedFramebufferfi', - 'glClearNamedFramebufferfv', - 'glClearNamedFramebufferiv', - 'glClearNamedFramebufferuiv', - 'glClearStencil', - 'glClearTexImage', - 'glClearTexSubImage', - 'glClientActiveTexture', - 'glClientWaitSync', - 'glClipControl', - 'glClipPlane', - 'glColor3b', - 'glColor3bv', - 'glColor3d', - 'glColor3dv', - 'glColor3f', - 'glColor3fv', - 'glColor3i', - 'glColor3iv', - 'glColor3s', - 'glColor3sv', - 'glColor3ub', - 'glColor3ubv', - 'glColor3ui', - 'glColor3uiv', - 'glColor3us', - 'glColor3usv', - 'glColor4b', - 'glColor4bv', - 'glColor4d', - 'glColor4dv', - 'glColor4f', - 'glColor4fv', - 'glColor4i', - 'glColor4iv', - 'glColor4s', - 'glColor4sv', - 'glColor4ub', - 'glColor4ubv', - 'glColor4ui', - 'glColor4uiv', - 'glColor4us', - 'glColor4usv', - 'glColorMask', - 'glColorMaski', - 'glColorMaterial', - 'glColorP3ui', - 'glColorP3uiv', - 'glColorP4ui', - 'glColorP4uiv', - 'glColorPointer', - 'glCompileShader', - 'glCompressedTexImage1D', - 'glCompressedTexImage2D', - 'glCompressedTexImage3D', - 'glCompressedTexSubImage1D', - 'glCompressedTexSubImage2D', - 'glCompressedTexSubImage3D', - 'glCompressedTextureSubImage1D', - 'glCompressedTextureSubImage2D', - 'glCompressedTextureSubImage3D', - 'glCopyBufferSubData', - 'glCopyImageSubData', - 'glCopyNamedBufferSubData', - 'glCopyPixels', - 'glCopyTexImage1D', - 'glCopyTexImage2D', - 'glCopyTexSubImage1D', - 'glCopyTexSubImage2D', - 'glCopyTexSubImage3D', - 'glCopyTextureSubImage1D', - 'glCopyTextureSubImage2D', - 'glCopyTextureSubImage3D', - 'glCreateBuffers', - 'glCreateFramebuffers', - 'glCreateProgram', - 'glCreateProgramPipelines', - 'glCreateQueries', - 'glCreateRenderbuffers', - 'glCreateSamplers', - 'glCreateShader', - 'glCreateShaderProgramv', - 'glCreateTextures', - 'glCreateTransformFeedbacks', - 'glCreateVertexArrays', - 'glCullFace', - 'glDebugMessageCallback', - 'glDebugMessageControl', - 'glDebugMessageInsert', - 'glDeleteBuffers', - 'glDeleteFramebuffers', - 'glDeleteFramebuffersEXT', - 'glDeleteLists', - 'glDeleteProgram', - 'glDeleteProgramPipelines', - 'glDeleteQueries', - 'glDeleteRenderbuffers', - 'glDeleteRenderbuffersEXT', - 'glDeleteSamplers', - 'glDeleteShader', - 'glDeleteSync', - 'glDeleteTextures', - 'glDeleteTransformFeedbacks', - 'glDeleteVertexArrays', - 'glDepthFunc', - 'glDepthMask', - 'glDepthRange', - 'glDepthRangeArrayv', - 'glDepthRangeIndexed', - 'glDepthRangef', - 'glDetachShader', - 'glDisable', - 'glDisableClientState', - 'glDisableVertexArrayAttrib', - 'glDisableVertexAttribArray', - 'glDisablei', - 'glDispatchCompute', - 'glDispatchComputeIndirect', - 'glDrawArrays', - 'glDrawArraysIndirect', - 'glDrawArraysInstanced', - 'glDrawArraysInstancedBaseInstance', - 'glDrawBuffer', - 'glDrawBuffers', - 'glDrawElements', - 'glDrawElementsBaseVertex', - 'glDrawElementsIndirect', - 'glDrawElementsInstanced', - 'glDrawElementsInstancedBaseInstance', - 'glDrawElementsInstancedBaseVertex', - 'glDrawElementsInstancedBaseVertexBaseInstance', - 'glDrawPixels', - 'glDrawRangeElements', - 'glDrawRangeElementsBaseVertex', - 'glDrawTransformFeedback', - 'glDrawTransformFeedbackInstanced', - 'glDrawTransformFeedbackStream', - 'glDrawTransformFeedbackStreamInstanced', - 'glEdgeFlag', - 'glEdgeFlagPointer', - 'glEdgeFlagv', - 'glEnable', - 'glEnableClientState', - 'glEnableVertexArrayAttrib', - 'glEnableVertexAttribArray', - 'glEnablei', - 'glEnd', - 'glEndConditionalRender', - 'glEndList', - 'glEndQuery', - 'glEndQueryIndexed', - 'glEndTransformFeedback', - 'glEvalCoord1d', - 'glEvalCoord1dv', - 'glEvalCoord1f', - 'glEvalCoord1fv', - 'glEvalCoord2d', - 'glEvalCoord2dv', - 'glEvalCoord2f', - 'glEvalCoord2fv', - 'glEvalMesh1', - 'glEvalMesh2', - 'glEvalPoint1', - 'glEvalPoint2', - 'glFeedbackBuffer', - 'glFenceSync', - 'glFinish', - 'glFlush', - 'glFlushMappedBufferRange', - 'glFlushMappedNamedBufferRange', - 'glFogCoordPointer', - 'glFogCoordd', - 'glFogCoorddv', - 'glFogCoordf', - 'glFogCoordfv', - 'glFogf', - 'glFogfv', - 'glFogi', - 'glFogiv', - 'glFramebufferParameteri', - 'glFramebufferRenderbuffer', - 'glFramebufferRenderbufferEXT', - 'glFramebufferTexture', - 'glFramebufferTexture1D', - 'glFramebufferTexture1DEXT', - 'glFramebufferTexture2D', - 'glFramebufferTexture2DEXT', - 'glFramebufferTexture3D', - 'glFramebufferTexture3DEXT', - 'glFramebufferTextureLayer', - 'glFrontFace', - 'glFrustum', - 'glGenBuffers', - 'glGenFramebuffers', - 'glGenFramebuffersEXT', - 'glGenLists', - 'glGenProgramPipelines', - 'glGenQueries', - 'glGenRenderbuffers', - 'glGenRenderbuffersEXT', - 'glGenSamplers', - 'glGenTextures', - 'glGenTransformFeedbacks', - 'glGenVertexArrays', - 'glGenerateMipmap', - 'glGenerateMipmapEXT', - 'glGenerateTextureMipmap', - 'glGetActiveAtomicCounterBufferiv', - 'glGetActiveAttrib', - 'glGetActiveSubroutineName', - 'glGetActiveSubroutineUniformName', - 'glGetActiveSubroutineUniformiv', - 'glGetActiveUniform', - 'glGetActiveUniformBlockName', - 'glGetActiveUniformBlockiv', - 'glGetActiveUniformName', - 'glGetActiveUniformsiv', - 'glGetAttachedShaders', - 'glGetAttribLocation', - 'glGetBooleani_v', - 'glGetBooleanv', - 'glGetBufferParameteri64v', - 'glGetBufferParameteriv', - 'glGetBufferPointerv', - 'glGetBufferSubData', - 'glGetClipPlane', - 'glGetCompressedTexImage', - 'glGetCompressedTextureImage', - 'glGetCompressedTextureSubImage', - 'glGetDebugMessageLog', - 'glGetDoublei_v', - 'glGetDoublev', - 'glGetError', - 'glGetFloati_v', - 'glGetFloatv', - 'glGetFragDataIndex', - 'glGetFragDataLocation', - 'glGetFramebufferAttachmentParameteriv', - 'glGetFramebufferAttachmentParameterivEXT', - 'glGetFramebufferParameteriv', - 'glGetGraphicsResetStatus', - 'glGetInteger64i_v', - 'glGetInteger64v', - 'glGetIntegeri_v', - 'glGetIntegerv', - 'glGetInternalformati64v', - 'glGetInternalformativ', - 'glGetLightfv', - 'glGetLightiv', - 'glGetMapdv', - 'glGetMapfv', - 'glGetMapiv', - 'glGetMaterialfv', - 'glGetMaterialiv', - 'glGetMultisamplefv', - 'glGetNamedBufferParameteri64v', - 'glGetNamedBufferParameteriv', - 'glGetNamedBufferPointerv', - 'glGetNamedBufferSubData', - 'glGetNamedFramebufferAttachmentParameteriv', - 'glGetNamedFramebufferParameteriv', - 'glGetNamedRenderbufferParameteriv', - 'glGetObjectLabel', - 'glGetObjectPtrLabel', - 'glGetPixelMapfv', - 'glGetPixelMapuiv', - 'glGetPixelMapusv', - 'glGetPointerv', - 'glGetPolygonStipple', - 'glGetProgramBinary', - 'glGetProgramInfoLog', - 'glGetProgramInterfaceiv', - 'glGetProgramPipelineInfoLog', - 'glGetProgramPipelineiv', - 'glGetProgramResourceIndex', - 'glGetProgramResourceLocation', - 'glGetProgramResourceLocationIndex', - 'glGetProgramResourceName', - 'glGetProgramResourceiv', - 'glGetProgramStageiv', - 'glGetProgramiv', - 'glGetQueryBufferObjecti64v', - 'glGetQueryBufferObjectiv', - 'glGetQueryBufferObjectui64v', - 'glGetQueryBufferObjectuiv', - 'glGetQueryIndexediv', - 'glGetQueryObjecti64v', - 'glGetQueryObjectiv', - 'glGetQueryObjectui64v', - 'glGetQueryObjectuiv', - 'glGetQueryiv', - 'glGetRenderbufferParameteriv', - 'glGetRenderbufferParameterivEXT', - 'glGetSamplerParameterIiv', - 'glGetSamplerParameterIuiv', - 'glGetSamplerParameterfv', - 'glGetSamplerParameteriv', - 'glGetShaderInfoLog', - 'glGetShaderPrecisionFormat', - 'glGetShaderSource', - 'glGetShaderiv', - 'glGetString', - 'glGetStringi', - 'glGetSubroutineIndex', - 'glGetSubroutineUniformLocation', - 'glGetSynciv', - 'glGetTexEnvfv', - 'glGetTexEnviv', - 'glGetTexGendv', - 'glGetTexGenfv', - 'glGetTexGeniv', - 'glGetTexImage', - 'glGetTexLevelParameterfv', - 'glGetTexLevelParameteriv', - 'glGetTexParameterIiv', - 'glGetTexParameterIuiv', - 'glGetTexParameterfv', - 'glGetTexParameteriv', - 'glGetTextureImage', - 'glGetTextureLevelParameterfv', - 'glGetTextureLevelParameteriv', - 'glGetTextureParameterIiv', - 'glGetTextureParameterIuiv', - 'glGetTextureParameterfv', - 'glGetTextureParameteriv', - 'glGetTextureSubImage', - 'glGetTransformFeedbackVarying', - 'glGetTransformFeedbacki64_v', - 'glGetTransformFeedbacki_v', - 'glGetTransformFeedbackiv', - 'glGetUniformBlockIndex', - 'glGetUniformIndices', - 'glGetUniformLocation', - 'glGetUniformSubroutineuiv', - 'glGetUniformdv', - 'glGetUniformfv', - 'glGetUniformiv', - 'glGetUniformuiv', - 'glGetVertexArrayIndexed64iv', - 'glGetVertexArrayIndexediv', - 'glGetVertexArrayiv', - 'glGetVertexAttribIiv', - 'glGetVertexAttribIuiv', - 'glGetVertexAttribLdv', - 'glGetVertexAttribPointerv', - 'glGetVertexAttribdv', - 'glGetVertexAttribfv', - 'glGetVertexAttribiv', - 'glGetnColorTable', - 'glGetnCompressedTexImage', - 'glGetnConvolutionFilter', - 'glGetnHistogram', - 'glGetnMapdv', - 'glGetnMapfv', - 'glGetnMapiv', - 'glGetnMinmax', - 'glGetnPixelMapfv', - 'glGetnPixelMapuiv', - 'glGetnPixelMapusv', - 'glGetnPolygonStipple', - 'glGetnSeparableFilter', - 'glGetnTexImage', - 'glGetnUniformdv', - 'glGetnUniformfv', - 'glGetnUniformiv', - 'glGetnUniformuiv', - 'glHint', - 'glIndexMask', - 'glIndexPointer', - 'glIndexd', - 'glIndexdv', - 'glIndexf', - 'glIndexfv', - 'glIndexi', - 'glIndexiv', - 'glIndexs', - 'glIndexsv', - 'glIndexub', - 'glIndexubv', - 'glInitNames', - 'glInterleavedArrays', - 'glInvalidateBufferData', - 'glInvalidateBufferSubData', - 'glInvalidateFramebuffer', - 'glInvalidateNamedFramebufferData', - 'glInvalidateNamedFramebufferSubData', - 'glInvalidateSubFramebuffer', - 'glInvalidateTexImage', - 'glInvalidateTexSubImage', - 'glIsBuffer', - 'glIsEnabled', - 'glIsEnabledi', - 'glIsFramebuffer', - 'glIsFramebufferEXT', - 'glIsList', - 'glIsProgram', - 'glIsProgramPipeline', - 'glIsQuery', - 'glIsRenderbuffer', - 'glIsRenderbufferEXT', - 'glIsSampler', - 'glIsShader', - 'glIsSync', - 'glIsTexture', - 'glIsTransformFeedback', - 'glIsVertexArray', - 'glLightModelf', - 'glLightModelfv', - 'glLightModeli', - 'glLightModeliv', - 'glLightf', - 'glLightfv', - 'glLighti', - 'glLightiv', - 'glLineStipple', - 'glLineWidth', - 'glLinkProgram', - 'glListBase', - 'glLoadIdentity', - 'glLoadMatrixd', - 'glLoadMatrixf', - 'glLoadName', - 'glLoadTransposeMatrixd', - 'glLoadTransposeMatrixf', - 'glLogicOp', - 'glMap1d', - 'glMap1f', - 'glMap2d', - 'glMap2f', - 'glMapBuffer', - 'glMapBufferRange', - 'glMapGrid1d', - 'glMapGrid1f', - 'glMapGrid2d', - 'glMapGrid2f', - 'glMapNamedBuffer', - 'glMapNamedBufferRange', - 'glMaterialf', - 'glMaterialfv', - 'glMateriali', - 'glMaterialiv', - 'glMatrixMode', - 'glMemoryBarrier', - 'glMemoryBarrierByRegion', - 'glMinSampleShading', - 'glMultMatrixd', - 'glMultMatrixf', - 'glMultTransposeMatrixd', - 'glMultTransposeMatrixf', - 'glMultiDrawArrays', - 'glMultiDrawArraysIndirect', - 'glMultiDrawArraysIndirectCount', - 'glMultiDrawElements', - 'glMultiDrawElementsBaseVertex', - 'glMultiDrawElementsIndirect', - 'glMultiDrawElementsIndirectCount', - 'glMultiTexCoord1d', - 'glMultiTexCoord1dv', - 'glMultiTexCoord1f', - 'glMultiTexCoord1fv', - 'glMultiTexCoord1i', - 'glMultiTexCoord1iv', - 'glMultiTexCoord1s', - 'glMultiTexCoord1sv', - 'glMultiTexCoord2d', - 'glMultiTexCoord2dv', - 'glMultiTexCoord2f', - 'glMultiTexCoord2fv', - 'glMultiTexCoord2i', - 'glMultiTexCoord2iv', - 'glMultiTexCoord2s', - 'glMultiTexCoord2sv', - 'glMultiTexCoord3d', - 'glMultiTexCoord3dv', - 'glMultiTexCoord3f', - 'glMultiTexCoord3fv', - 'glMultiTexCoord3i', - 'glMultiTexCoord3iv', - 'glMultiTexCoord3s', - 'glMultiTexCoord3sv', - 'glMultiTexCoord4d', - 'glMultiTexCoord4dv', - 'glMultiTexCoord4f', - 'glMultiTexCoord4fv', - 'glMultiTexCoord4i', - 'glMultiTexCoord4iv', - 'glMultiTexCoord4s', - 'glMultiTexCoord4sv', - 'glMultiTexCoordP1ui', - 'glMultiTexCoordP1uiv', - 'glMultiTexCoordP2ui', - 'glMultiTexCoordP2uiv', - 'glMultiTexCoordP3ui', - 'glMultiTexCoordP3uiv', - 'glMultiTexCoordP4ui', - 'glMultiTexCoordP4uiv', - 'glNamedBufferData', - 'glNamedBufferStorage', - 'glNamedBufferSubData', - 'glNamedFramebufferDrawBuffer', - 'glNamedFramebufferDrawBuffers', - 'glNamedFramebufferParameteri', - 'glNamedFramebufferReadBuffer', - 'glNamedFramebufferRenderbuffer', - 'glNamedFramebufferTexture', - 'glNamedFramebufferTextureLayer', - 'glNamedRenderbufferStorage', - 'glNamedRenderbufferStorageMultisample', - 'glNewList', - 'glNormal3b', - 'glNormal3bv', - 'glNormal3d', - 'glNormal3dv', - 'glNormal3f', - 'glNormal3fv', - 'glNormal3i', - 'glNormal3iv', - 'glNormal3s', - 'glNormal3sv', - 'glNormalP3ui', - 'glNormalP3uiv', - 'glNormalPointer', - 'glObjectLabel', - 'glObjectPtrLabel', - 'glOrtho', - 'glPassThrough', - 'glPatchParameterfv', - 'glPatchParameteri', - 'glPauseTransformFeedback', - 'glPixelMapfv', - 'glPixelMapuiv', - 'glPixelMapusv', - 'glPixelStoref', - 'glPixelStorei', - 'glPixelTransferf', - 'glPixelTransferi', - 'glPixelZoom', - 'glPointParameterf', - 'glPointParameterfv', - 'glPointParameteri', - 'glPointParameteriv', - 'glPointSize', - 'glPolygonMode', - 'glPolygonOffset', - 'glPolygonOffsetClamp', - 'glPolygonStipple', - 'glPopAttrib', - 'glPopClientAttrib', - 'glPopDebugGroup', - 'glPopMatrix', - 'glPopName', - 'glPrimitiveRestartIndex', - 'glPrioritizeTextures', - 'glProgramBinary', - 'glProgramParameteri', - 'glProgramUniform1d', - 'glProgramUniform1dv', - 'glProgramUniform1f', - 'glProgramUniform1fv', - 'glProgramUniform1i', - 'glProgramUniform1iv', - 'glProgramUniform1ui', - 'glProgramUniform1uiv', - 'glProgramUniform2d', - 'glProgramUniform2dv', - 'glProgramUniform2f', - 'glProgramUniform2fv', - 'glProgramUniform2i', - 'glProgramUniform2iv', - 'glProgramUniform2ui', - 'glProgramUniform2uiv', - 'glProgramUniform3d', - 'glProgramUniform3dv', - 'glProgramUniform3f', - 'glProgramUniform3fv', - 'glProgramUniform3i', - 'glProgramUniform3iv', - 'glProgramUniform3ui', - 'glProgramUniform3uiv', - 'glProgramUniform4d', - 'glProgramUniform4dv', - 'glProgramUniform4f', - 'glProgramUniform4fv', - 'glProgramUniform4i', - 'glProgramUniform4iv', - 'glProgramUniform4ui', - 'glProgramUniform4uiv', - 'glProgramUniformMatrix2dv', - 'glProgramUniformMatrix2fv', - 'glProgramUniformMatrix2x3dv', - 'glProgramUniformMatrix2x3fv', - 'glProgramUniformMatrix2x4dv', - 'glProgramUniformMatrix2x4fv', - 'glProgramUniformMatrix3dv', - 'glProgramUniformMatrix3fv', - 'glProgramUniformMatrix3x2dv', - 'glProgramUniformMatrix3x2fv', - 'glProgramUniformMatrix3x4dv', - 'glProgramUniformMatrix3x4fv', - 'glProgramUniformMatrix4dv', - 'glProgramUniformMatrix4fv', - 'glProgramUniformMatrix4x2dv', - 'glProgramUniformMatrix4x2fv', - 'glProgramUniformMatrix4x3dv', - 'glProgramUniformMatrix4x3fv', - 'glProvokingVertex', - 'glPushAttrib', - 'glPushClientAttrib', - 'glPushDebugGroup', - 'glPushMatrix', - 'glPushName', - 'glQueryCounter', - 'glRasterPos2d', - 'glRasterPos2dv', - 'glRasterPos2f', - 'glRasterPos2fv', - 'glRasterPos2i', - 'glRasterPos2iv', - 'glRasterPos2s', - 'glRasterPos2sv', - 'glRasterPos3d', - 'glRasterPos3dv', - 'glRasterPos3f', - 'glRasterPos3fv', - 'glRasterPos3i', - 'glRasterPos3iv', - 'glRasterPos3s', - 'glRasterPos3sv', - 'glRasterPos4d', - 'glRasterPos4dv', - 'glRasterPos4f', - 'glRasterPos4fv', - 'glRasterPos4i', - 'glRasterPos4iv', - 'glRasterPos4s', - 'glRasterPos4sv', - 'glReadBuffer', - 'glReadPixels', - 'glReadnPixels', - 'glRectd', - 'glRectdv', - 'glRectf', - 'glRectfv', - 'glRecti', - 'glRectiv', - 'glRects', - 'glRectsv', - 'glReleaseShaderCompiler', - 'glRenderMode', - 'glRenderbufferStorage', - 'glRenderbufferStorageEXT', - 'glRenderbufferStorageMultisample', - 'glResumeTransformFeedback', - 'glRotated', - 'glRotatef', - 'glSampleCoverage', - 'glSampleCoverageARB', - 'glSampleMaski', - 'glSamplerParameterIiv', - 'glSamplerParameterIuiv', - 'glSamplerParameterf', - 'glSamplerParameterfv', - 'glSamplerParameteri', - 'glSamplerParameteriv', - 'glScaled', - 'glScalef', - 'glScissor', - 'glScissorArrayv', - 'glScissorIndexed', - 'glScissorIndexedv', - 'glSecondaryColor3b', - 'glSecondaryColor3bv', - 'glSecondaryColor3d', - 'glSecondaryColor3dv', - 'glSecondaryColor3f', - 'glSecondaryColor3fv', - 'glSecondaryColor3i', - 'glSecondaryColor3iv', - 'glSecondaryColor3s', - 'glSecondaryColor3sv', - 'glSecondaryColor3ub', - 'glSecondaryColor3ubv', - 'glSecondaryColor3ui', - 'glSecondaryColor3uiv', - 'glSecondaryColor3us', - 'glSecondaryColor3usv', - 'glSecondaryColorP3ui', - 'glSecondaryColorP3uiv', - 'glSecondaryColorPointer', - 'glSelectBuffer', - 'glShadeModel', - 'glShaderBinary', - 'glShaderSource', - 'glShaderStorageBlockBinding', - 'glSpecializeShader', - 'glStencilFunc', - 'glStencilFuncSeparate', - 'glStencilMask', - 'glStencilMaskSeparate', - 'glStencilOp', - 'glStencilOpSeparate', - 'glTexBuffer', - 'glTexBufferRange', - 'glTexCoord1d', - 'glTexCoord1dv', - 'glTexCoord1f', - 'glTexCoord1fv', - 'glTexCoord1i', - 'glTexCoord1iv', - 'glTexCoord1s', - 'glTexCoord1sv', - 'glTexCoord2d', - 'glTexCoord2dv', - 'glTexCoord2f', - 'glTexCoord2fv', - 'glTexCoord2i', - 'glTexCoord2iv', - 'glTexCoord2s', - 'glTexCoord2sv', - 'glTexCoord3d', - 'glTexCoord3dv', - 'glTexCoord3f', - 'glTexCoord3fv', - 'glTexCoord3i', - 'glTexCoord3iv', - 'glTexCoord3s', - 'glTexCoord3sv', - 'glTexCoord4d', - 'glTexCoord4dv', - 'glTexCoord4f', - 'glTexCoord4fv', - 'glTexCoord4i', - 'glTexCoord4iv', - 'glTexCoord4s', - 'glTexCoord4sv', - 'glTexCoordP1ui', - 'glTexCoordP1uiv', - 'glTexCoordP2ui', - 'glTexCoordP2uiv', - 'glTexCoordP3ui', - 'glTexCoordP3uiv', - 'glTexCoordP4ui', - 'glTexCoordP4uiv', - 'glTexCoordPointer', - 'glTexEnvf', - 'glTexEnvfv', - 'glTexEnvi', - 'glTexEnviv', - 'glTexGend', - 'glTexGendv', - 'glTexGenf', - 'glTexGenfv', - 'glTexGeni', - 'glTexGeniv', - 'glTexImage1D', - 'glTexImage2D', - 'glTexImage2DMultisample', - 'glTexImage3D', - 'glTexImage3DMultisample', - 'glTexParameterIiv', - 'glTexParameterIuiv', - 'glTexParameterf', - 'glTexParameterfv', - 'glTexParameteri', - 'glTexParameteriv', - 'glTexStorage1D', - 'glTexStorage2D', - 'glTexStorage2DMultisample', - 'glTexStorage3D', - 'glTexStorage3DMultisample', - 'glTexSubImage1D', - 'glTexSubImage2D', - 'glTexSubImage3D', - 'glTextureBarrier', - 'glTextureBuffer', - 'glTextureBufferRange', - 'glTextureParameterIiv', - 'glTextureParameterIuiv', - 'glTextureParameterf', - 'glTextureParameterfv', - 'glTextureParameteri', - 'glTextureParameteriv', - 'glTextureStorage1D', - 'glTextureStorage2D', - 'glTextureStorage2DMultisample', - 'glTextureStorage3D', - 'glTextureStorage3DMultisample', - 'glTextureSubImage1D', - 'glTextureSubImage2D', - 'glTextureSubImage3D', - 'glTextureView', - 'glTransformFeedbackBufferBase', - 'glTransformFeedbackBufferRange', - 'glTransformFeedbackVaryings', - 'glTranslated', - 'glTranslatef', - 'glUniform1d', - 'glUniform1dv', - 'glUniform1f', - 'glUniform1fv', - 'glUniform1i', - 'glUniform1iv', - 'glUniform1ui', - 'glUniform1uiv', - 'glUniform2d', - 'glUniform2dv', - 'glUniform2f', - 'glUniform2fv', - 'glUniform2i', - 'glUniform2iv', - 'glUniform2ui', - 'glUniform2uiv', - 'glUniform3d', - 'glUniform3dv', - 'glUniform3f', - 'glUniform3fv', - 'glUniform3i', - 'glUniform3iv', - 'glUniform3ui', - 'glUniform3uiv', - 'glUniform4d', - 'glUniform4dv', - 'glUniform4f', - 'glUniform4fv', - 'glUniform4i', - 'glUniform4iv', - 'glUniform4ui', - 'glUniform4uiv', - 'glUniformBlockBinding', - 'glUniformMatrix2dv', - 'glUniformMatrix2fv', - 'glUniformMatrix2x3dv', - 'glUniformMatrix2x3fv', - 'glUniformMatrix2x4dv', - 'glUniformMatrix2x4fv', - 'glUniformMatrix3dv', - 'glUniformMatrix3fv', - 'glUniformMatrix3x2dv', - 'glUniformMatrix3x2fv', - 'glUniformMatrix3x4dv', - 'glUniformMatrix3x4fv', - 'glUniformMatrix4dv', - 'glUniformMatrix4fv', - 'glUniformMatrix4x2dv', - 'glUniformMatrix4x2fv', - 'glUniformMatrix4x3dv', - 'glUniformMatrix4x3fv', - 'glUniformSubroutinesuiv', - 'glUnmapBuffer', - 'glUnmapNamedBuffer', - 'glUseProgram', - 'glUseProgramStages', - 'glValidateProgram', - 'glValidateProgramPipeline', - 'glVertex2d', - 'glVertex2dv', - 'glVertex2f', - 'glVertex2fv', - 'glVertex2i', - 'glVertex2iv', - 'glVertex2s', - 'glVertex2sv', - 'glVertex3d', - 'glVertex3dv', - 'glVertex3f', - 'glVertex3fv', - 'glVertex3i', - 'glVertex3iv', - 'glVertex3s', - 'glVertex3sv', - 'glVertex4d', - 'glVertex4dv', - 'glVertex4f', - 'glVertex4fv', - 'glVertex4i', - 'glVertex4iv', - 'glVertex4s', - 'glVertex4sv', - 'glVertexArrayAttribBinding', - 'glVertexArrayAttribFormat', - 'glVertexArrayAttribIFormat', - 'glVertexArrayAttribLFormat', - 'glVertexArrayBindingDivisor', - 'glVertexArrayElementBuffer', - 'glVertexArrayVertexBuffer', - 'glVertexArrayVertexBuffers', - 'glVertexAttrib1d', - 'glVertexAttrib1dv', - 'glVertexAttrib1f', - 'glVertexAttrib1fv', - 'glVertexAttrib1s', - 'glVertexAttrib1sv', - 'glVertexAttrib2d', - 'glVertexAttrib2dv', - 'glVertexAttrib2f', - 'glVertexAttrib2fv', - 'glVertexAttrib2s', - 'glVertexAttrib2sv', - 'glVertexAttrib3d', - 'glVertexAttrib3dv', - 'glVertexAttrib3f', - 'glVertexAttrib3fv', - 'glVertexAttrib3s', - 'glVertexAttrib3sv', - 'glVertexAttrib4Nbv', - 'glVertexAttrib4Niv', - 'glVertexAttrib4Nsv', - 'glVertexAttrib4Nub', - 'glVertexAttrib4Nubv', - 'glVertexAttrib4Nuiv', - 'glVertexAttrib4Nusv', - 'glVertexAttrib4bv', - 'glVertexAttrib4d', - 'glVertexAttrib4dv', - 'glVertexAttrib4f', - 'glVertexAttrib4fv', - 'glVertexAttrib4iv', - 'glVertexAttrib4s', - 'glVertexAttrib4sv', - 'glVertexAttrib4ubv', - 'glVertexAttrib4uiv', - 'glVertexAttrib4usv', - 'glVertexAttribBinding', - 'glVertexAttribDivisor', - 'glVertexAttribFormat', - 'glVertexAttribI1i', - 'glVertexAttribI1iv', - 'glVertexAttribI1ui', - 'glVertexAttribI1uiv', - 'glVertexAttribI2i', - 'glVertexAttribI2iv', - 'glVertexAttribI2ui', - 'glVertexAttribI2uiv', - 'glVertexAttribI3i', - 'glVertexAttribI3iv', - 'glVertexAttribI3ui', - 'glVertexAttribI3uiv', - 'glVertexAttribI4bv', - 'glVertexAttribI4i', - 'glVertexAttribI4iv', - 'glVertexAttribI4sv', - 'glVertexAttribI4ubv', - 'glVertexAttribI4ui', - 'glVertexAttribI4uiv', - 'glVertexAttribI4usv', - 'glVertexAttribIFormat', - 'glVertexAttribIPointer', - 'glVertexAttribL1d', - 'glVertexAttribL1dv', - 'glVertexAttribL2d', - 'glVertexAttribL2dv', - 'glVertexAttribL3d', - 'glVertexAttribL3dv', - 'glVertexAttribL4d', - 'glVertexAttribL4dv', - 'glVertexAttribLFormat', - 'glVertexAttribLPointer', - 'glVertexAttribP1ui', - 'glVertexAttribP1uiv', - 'glVertexAttribP2ui', - 'glVertexAttribP2uiv', - 'glVertexAttribP3ui', - 'glVertexAttribP3uiv', - 'glVertexAttribP4ui', - 'glVertexAttribP4uiv', - 'glVertexAttribPointer', - 'glVertexBindingDivisor', - 'glVertexP2ui', - 'glVertexP2uiv', - 'glVertexP3ui', - 'glVertexP3uiv', - 'glVertexP4ui', - 'glVertexP4uiv', - 'glVertexPointer', - 'glViewport', - 'glViewportArrayv', - 'glViewportIndexedf', - 'glViewportIndexedfv', - 'glWaitSync', - 'glWindowPos2d', - 'glWindowPos2dv', - 'glWindowPos2f', - 'glWindowPos2fv', - 'glWindowPos2i', - 'glWindowPos2iv', - 'glWindowPos2s', - 'glWindowPos2sv', - 'glWindowPos3d', - 'glWindowPos3dv', - 'glWindowPos3f', - 'glWindowPos3fv', - 'glWindowPos3i', - 'glWindowPos3iv', - 'glWindowPos3s', - 'glWindowPos3sv', -] diff --git a/spaces/aijack/jojo/e4e/editings/latent_editor.py b/spaces/aijack/jojo/e4e/editings/latent_editor.py deleted file mode 100644 index 4bebca2f5c86f71b58fa1f30d24bfcb0da06d88f..0000000000000000000000000000000000000000 --- a/spaces/aijack/jojo/e4e/editings/latent_editor.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import sys -sys.path.append(".") -sys.path.append("..") -from editings import ganspace, sefa -from utils.common import tensor2im - - -class LatentEditor(object): - def __init__(self, stylegan_generator, is_cars=False): - self.generator = stylegan_generator - self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output. - - def apply_ganspace(self, latent, ganspace_pca, edit_directions): - edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions) - return self._latents_to_image(edit_latents) - - def apply_interfacegan(self, latent, direction, factor=1, factor_range=None): - edit_latents = [] - if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5) - for f in range(*factor_range): - edit_latent = latent + f * direction - edit_latents.append(edit_latent) - edit_latents = torch.cat(edit_latents) - else: - edit_latents = latent + factor * direction - return self._latents_to_image(edit_latents) - - def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs): - edit_latents = sefa.edit(self.generator, latent, indices, **kwargs) - return self._latents_to_image(edit_latents) - - # Currently, in order to apply StyleFlow editings, one should run inference, - # save the latent codes and load them form the official StyleFlow repository. - # def apply_styleflow(self): - # pass - - def _latents_to_image(self, latents): - with torch.no_grad(): - images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True) - if self.is_cars: - images = images[:, :, 64:448, :] # 512x512 -> 384x512 - horizontal_concat_image = torch.cat(list(images), 2) - final_image = tensor2im(horizontal_concat_image) - return final_image diff --git a/spaces/akhaliq/ArcaneGAN-blocks/app.py b/spaces/akhaliq/ArcaneGAN-blocks/app.py deleted file mode 100644 index 98a1af5f1cc61673107fbb7896f2c055c822e066..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/ArcaneGAN-blocks/app.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -os.system("pip freeze") -from huggingface_hub import hf_hub_download -os.system("pip -qq install facenet_pytorch") -from facenet_pytorch import MTCNN -from torchvision import transforms -import torch, PIL -from tqdm.notebook import tqdm -import gradio as gr -import torch - -modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit") -modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit") -modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit") - - -mtcnn = MTCNN(image_size=256, margin=80) - -# simplest ye olde trustworthy MTCNN for face detection with landmarks -def detect(img): - - # Detect faces - batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True) - # Select faces - if not mtcnn.keep_all: - batch_boxes, batch_probs, batch_points = mtcnn.select_boxes( - batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method - ) - - return batch_boxes, batch_points - -# my version of isOdd, should make a separate repo for it :D -def makeEven(_x): - return _x if (_x % 2 == 0) else _x+1 - -# the actual scaler function -def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False): - - x, y = _img.size - - ratio = 2 #initial ratio - - #scale to desired face size - if (boxes is not None): - if len(boxes)>0: - ratio = target_face/max(boxes[0][2:]-boxes[0][:2]); - ratio = min(ratio, max_upscale) - if VERBOSE: print('up by', ratio) - - if fixed_ratio>0: - if VERBOSE: print('fixed ratio') - ratio = fixed_ratio - - x*=ratio - y*=ratio - - #downscale to fit into max res - res = x*y - if res > max_res: - ratio = pow(res/max_res,1/2); - if VERBOSE: print(ratio) - x=int(x/ratio) - y=int(y/ratio) - - #make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch - x = makeEven(int(x)) - y = makeEven(int(y)) - - size = (x, y) - - return _img.resize(size) - -""" - A useful scaler algorithm, based on face detection. - Takes PIL.Image, returns a uniformly scaled PIL.Image - boxes: a list of detected bboxes - _img: PIL.Image - max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU. - target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension. - fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit. - max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess. -""" - -def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False): - boxes = None - boxes, _ = detect(_img) - if VERBOSE: print('boxes',boxes) - img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE) - return img_resized - - -size = 256 - -means = [0.485, 0.456, 0.406] -stds = [0.229, 0.224, 0.225] - -t_stds = torch.tensor(stds).cpu().half().float()[:,None,None] -t_means = torch.tensor(means).cpu().half().float()[:,None,None] - -def makeEven(_x): - return int(_x) if (_x % 2 == 0) else int(_x+1) - -img_transforms = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(means,stds)]) - -def tensor2im(var): - return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0) - -def proc_pil_img(input_image, model): - transformed_image = img_transforms(input_image)[None,...].cpu().half().float() - - with torch.no_grad(): - result_image = model(transformed_image)[0] - output_image = tensor2im(result_image) - output_image = output_image.detach().cpu().numpy().astype('uint8') - output_image = PIL.Image.fromarray(output_image) - return output_image - - - -modelv4 = torch.jit.load(modelarcanev4,map_location='cpu').eval().cpu().half().float() -modelv3 = torch.jit.load(modelarcanev3,map_location='cpu').eval().cpu().half().float() -modelv2 = torch.jit.load(modelarcanev2,map_location='cpu').eval().cpu().half().float() - -def version4(im): - im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) - res = proc_pil_img(im, modelv4) - return res - -def version3(im): - im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) - res = proc_pil_img(im, modelv3) - return res - -def version2(im): - im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) - res = proc_pil_img(im, modelv2) - return res - -block = gr.Blocks() - -with block: - gr.Markdown("Gradio Demo for ArcaneGAN, portrait to Arcane style. To use it, simply upload your image. Try out the different versions by clicking on the tabs. Please use a cropped portrait picture for best results.") - - with gr.Tab("version four"): - - with gr.Row(): - facepaint4 = gr.inputs.Image(type="pil",shape=(512,512)) - faceout4 = gr.outputs.Image(type="pil") - face_run = gr.Button("Run") - face_run.click(version4, inputs=facepaint4, outputs=faceout4) - - with gr.Tab("version three"): - with gr.Row(): - facepaint3 = gr.inputs.Image(type="pil") - faceout3 = gr.outputs.Image(type="pil") - face_run = gr.Button("Run") - face_run.click(version3, inputs=facepaint3, outputs=faceout3) - with gr.Tab("version two"): - with gr.Row(): - facepaint2 = gr.inputs.Image(type="pil") - faceout2 = gr.outputs.Image(type="pil") - face_run = gr.Button("Run") - face_run.click(version2, inputs=facepaint2, outputs=faceout2) - -block.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/akhaliq/Kapao/utils/augmentations.py b/spaces/akhaliq/Kapao/utils/augmentations.py deleted file mode 100644 index 8e7f34b5ac7fce07c84d3321469d1e0eb712843b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Kapao/utils/augmentations.py +++ /dev/null @@ -1,321 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Image augmentation functions -""" - -import logging -import math -import random - -import cv2 -import numpy as np - -from utils.general import colorstr, segment2box, resample_segments, check_version -from utils.metrics import bbox_ioa - - -class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): - self.transform = None - try: - import albumentations as A - check_version(A.__version__, '1.0.3') # version requirement - - self.transform = A.Compose([ - A.Blur(p=0.1), - A.MedianBlur(p=0.1), - A.ToGray(p=0.01)], - bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - - logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) - except ImportError: # package not installed, skip - pass - except Exception as e: - logging.info(colorstr('albumentations: ') + f'{e}') - - def __call__(self, im, labels, p=1.0): - if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) - return im, labels - - -def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - # HSV color-space augmentation - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - dtype = im.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed - - -def hist_equalize(im, clahe=True, bgr=False): - # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def replicate(im, labels): - # Replicate labels - h, w = im.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return im, labels - - -def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better val mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return im, ratio, (dw, dh) - - -def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, - border=(0, 0), kp_bbox=None): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = im.shape[0] + border[0] * 2 # shape(h,w,c) - width = im.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -im.shape[1] / 2 # x translation (pixels) - C[1, 2] = -im.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - n = len(targets) - if n and targets.shape[1] > 5: - # warp keypoints in person object - person_mask = targets[:, 0] == 0 - person_targets = targets[person_mask] - if len(person_targets) > 0: - xy = person_targets[:, 5:].reshape(-1, 3) - vis = xy[:, 2:].copy() - xy[:, 2:] = 1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine - out_mask = ( - (xy[:, 0] < 0) | - (xy[:, 1] < 0) | - (xy[:, 0] > width) | - (xy[:, 1] > height) - ) - vis[out_mask] = 0 - keypoints = np.concatenate((xy, vis), axis=-1) - targets[person_mask, 5:] = keypoints.reshape(person_targets.shape[0], -1) - - # resize keypoint bbox sizes back to original - if n and kp_bbox is not None: - for i in range(int(targets[:, 0].max()) + 1): - if i > 0: - if isinstance(kp_bbox, list): - kp_bbox_i = kp_bbox[i - 1] - else: - kp_bbox_i = kp_bbox - - kp_mask = targets[:, 0] == i - kp_targets = targets[kp_mask] - - xc = kp_targets[:, [1, 3]].mean(axis=-1) - yc = kp_targets[:, [2, 4]].mean(axis=-1) - - kp_targets[:, 1] = xc - (kp_bbox_i * width) / 2 - kp_targets[:, 2] = yc - (kp_bbox_i * height) / 2 - kp_targets[:, 3] = xc + (kp_bbox_i * width) / 2 - kp_targets[:, 4] = yc + (kp_bbox_i * height) / 2 - - targets[kp_mask] = kp_targets - - # clip - targets[:, [1, 3]] = targets[:, [1, 3]].clip(0, width) - targets[:, [2, 4]] = targets[:, [2, 4]].clip(0, height) - - return im, targets - - -def copy_paste(im, labels, segments, p=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if p and n: - h, w, c = im.shape # height, width, channels - im_new = np.zeros(im.shape, np.uint8) - for j in random.sample(range(n), k=round(p * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug - - return im, labels, segments - - -def cutout(im, labels, p=0.5): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - if random.random() < p: - h, w = im.shape[:2] - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) # create random masks - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def mixup(im, labels, im2, labels2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - return im, labels - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates diff --git a/spaces/akhaliq/Mask2Former/tools/README.md b/spaces/akhaliq/Mask2Former/tools/README.md deleted file mode 100644 index 309f767b8f3ae10208107e72032f64d18f429a8c..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/tools/README.md +++ /dev/null @@ -1,79 +0,0 @@ -This directory contains few tools for MaskFormer. - -* `convert-torchvision-to-d2.py` - -Tool to convert torchvision pre-trained weights for D2. - -``` -wget https://download.pytorch.org/models/resnet101-63fe2227.pth -python tools/convert-torchvision-to-d2.py resnet101-63fe2227.pth R-101.pkl -``` - -* `convert-pretrained-swin-model-to-d2.py` - -Tool to convert Swin Transformer pre-trained weights for D2. - -``` -pip install timm - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth -python tools/convert-pretrained-swin-model-to-d2.py swin_tiny_patch4_window7_224.pth swin_tiny_patch4_window7_224.pkl - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth -python tools/convert-pretrained-swin-model-to-d2.py swin_small_patch4_window7_224.pth swin_small_patch4_window7_224.pkl - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth -python tools/convert-pretrained-swin-model-to-d2.py swin_base_patch4_window12_384_22k.pth swin_base_patch4_window12_384_22k.pkl - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth -python tools/convert-pretrained-swin-model-to-d2.py swin_large_patch4_window12_384_22k.pth swin_large_patch4_window12_384_22k.pkl -``` - -* `evaluate_pq_for_semantic_segmentation.py` - -Tool to evaluate PQ (PQ-stuff) for semantic segmentation predictions. - -Usage: - -``` -python tools/evaluate_pq_for_semantic_segmentation.py --dataset-name ade20k_sem_seg_val --json-file OUTPUT_DIR/inference/sem_seg_predictions.json -``` - -where `OUTPUT_DIR` is set in the config file. - -* `evaluate_coco_boundary_ap.py` - -Tool to evaluate Boundary AP for instance segmentation predictions. - -Usage: - -``` -python tools/coco_instance_evaluation.py --gt-json-file COCO_GT_JSON --dt-json-file COCO_DT_JSON -``` - -To install Boundary IoU API, run: - -``` -pip install git+https://github.com/bowenc0221/boundary-iou-api.git -``` - -* `analyze_model.py` - -Tool to analyze model parameters and flops. - -Usage for semantic segmentation (ADE20K only, use with caution!): - -``` -python tools/analyze_model.py --num-inputs 1 --tasks flop --use-fixed-input-size --config-file CONFIG_FILE -``` - -Note that, for semantic segmentation (ADE20K only), we use a dummy image with fixed size that equals to `cfg.INPUT.CROP.SIZE[0] x cfg.INPUT.CROP.SIZE[0]`. -Please do not use `--use-fixed-input-size` for calculating FLOPs on other datasets like Cityscapes! - -Usage for panoptic and instance segmentation: - -``` -python tools/analyze_model.py --num-inputs 100 --tasks flop --config-file CONFIG_FILE -``` - -Note that, for panoptic and instance segmentation, we compute the average flops over 100 real validation images. diff --git a/spaces/akhaliq/PAMA/README.md b/spaces/akhaliq/PAMA/README.md deleted file mode 100644 index 3da9b444908893bd12bd79a234a2da22aa0cc91a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/PAMA/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: PAMA -emoji: 💻 -colorFrom: purple -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/akhaliq/Real-ESRGAN/FAQ.md b/spaces/akhaliq/Real-ESRGAN/FAQ.md deleted file mode 100644 index caa8c08cfe4302eb8812c823569e8a0be30fa49c..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Real-ESRGAN/FAQ.md +++ /dev/null @@ -1,9 +0,0 @@ -# FAQ - -1. **What is the difference of `--netscale` and `outscale`?** - -A: TODO. - -1. **How to select models?** - -A: TODO. diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jnas/voc1/cmd.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jnas/voc1/cmd.sh deleted file mode 100644 index 19f342102fc4f3389157c48f1196b16b68eb1cf1..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/jnas/voc1/cmd.sh +++ /dev/null @@ -1,91 +0,0 @@ -# ====== About run.pl, queue.pl, slurm.pl, and ssh.pl ====== -# Usage: .pl [options] JOB=1: -# e.g. -# run.pl --mem 4G JOB=1:10 echo.JOB.log echo JOB -# -# Options: -# --time